repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Writable;
/**
* A named counter that tracks the progress of a map/reduce job.
*
* <p><code>Counters</code> represent global counters, defined either by the
* Map-Reduce framework or applications. Each <code>Counter</code> is named by
* an {@link Enum} and has a long for the value.</p>
*
* <p><code>Counters</code> are bunched into Groups, each comprising of
* counters from a particular <code>Enum</code> class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Counter extends Writable {
/**
* Set the display name of the counter
* @param displayName of the counter
* @deprecated (and no-op by default)
*/
@Deprecated
void setDisplayName(String displayName);
/**
* @return the name of the counter
*/
String getName();
/**
* Get the display name of the counter.
* @return the user facing name of the counter
*/
String getDisplayName();
/**
* What is the current value of this counter?
* @return the current value
*/
long getValue();
/**
* Set this counter by the given value
* @param value the value to set
*/
void setValue(long value);
/**
* Increment this counter by the given value
* @param incr the value to increase this counter by
*/
void increment(long incr);
@Private
/**
* Return the underlying object if this is a facade.
* @return the undelying object.
*/
Counter getUnderlyingCounter();
}
| 2,498 | 28.75 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
/**
* Maps input key/value pairs to a set of intermediate key/value pairs.
*
* <p>Maps are the individual tasks which transform input records into a
* intermediate records. The transformed intermediate records need not be of
* the same type as the input records. A given input pair may map to zero or
* many output pairs.</p>
*
* <p>The Hadoop Map-Reduce framework spawns one map task for each
* {@link InputSplit} generated by the {@link InputFormat} for the job.
* <code>Mapper</code> implementations can access the {@link Configuration} for
* the job via the {@link JobContext#getConfiguration()}.
*
* <p>The framework first calls
* {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by
* {@link #map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context)}
* for each key/value pair in the <code>InputSplit</code>. Finally
* {@link #cleanup(org.apache.hadoop.mapreduce.Mapper.Context)} is called.</p>
*
* <p>All intermediate values associated with a given output key are
* subsequently grouped by the framework, and passed to a {@link Reducer} to
* determine the final output. Users can control the sorting and grouping by
* specifying two key {@link RawComparator} classes.</p>
*
* <p>The <code>Mapper</code> outputs are partitioned per
* <code>Reducer</code>. Users can control which keys (and hence records) go to
* which <code>Reducer</code> by implementing a custom {@link Partitioner}.
*
* <p>Users can optionally specify a <code>combiner</code>, via
* {@link Job#setCombinerClass(Class)}, to perform local aggregation of the
* intermediate outputs, which helps to cut down the amount of data transferred
* from the <code>Mapper</code> to the <code>Reducer</code>.
*
* <p>Applications can specify if and how the intermediate
* outputs are to be compressed and which {@link CompressionCodec}s are to be
* used via the <code>Configuration</code>.</p>
*
* <p>If the job has zero
* reduces then the output of the <code>Mapper</code> is directly written
* to the {@link OutputFormat} without sorting by keys.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class TokenCounterMapper
* extends Mapper<Object, Text, Text, IntWritable>{
*
* private final static IntWritable one = new IntWritable(1);
* private Text word = new Text();
*
* public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
* StringTokenizer itr = new StringTokenizer(value.toString());
* while (itr.hasMoreTokens()) {
* word.set(itr.nextToken());
* context.write(word, one);
* }
* }
* }
* </pre></blockquote>
*
* <p>Applications may override the
* {@link #run(org.apache.hadoop.mapreduce.Mapper.Context)} method to exert
* greater control on map processing e.g. multi-threaded <code>Mapper</code>s
* etc.</p>
*
* @see InputFormat
* @see JobContext
* @see Partitioner
* @see Reducer
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
/**
* The <code>Context</code> passed on to the {@link Mapper} implementations.
*/
public abstract class Context
implements MapContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
}
/**
* Called once at the beginning of the task.
*/
protected void setup(Context context
) throws IOException, InterruptedException {
// NOTHING
}
/**
* Called once for each key/value pair in the input split. Most applications
* should override this, but the default is the identity function.
*/
@SuppressWarnings("unchecked")
protected void map(KEYIN key, VALUEIN value,
Context context) throws IOException, InterruptedException {
context.write((KEYOUT) key, (VALUEOUT) value);
}
/**
* Called once at the end of the task.
*/
protected void cleanup(Context context
) throws IOException, InterruptedException {
// NOTHING
}
/**
* Expert users can override this method for more complete control over the
* execution of the Mapper.
* @param context
* @throws IOException
*/
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
while (context.nextKeyValue()) {
map(context.getCurrentKey(), context.getCurrentValue(), context);
}
} finally {
cleanup(context);
}
}
}
| 5,671 | 36.071895 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
/**
* <code>OutputFormat</code> describes the output-specification for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
* job to:<p>
* <ol>
* <li>
* Validate the output-specification of the job. For e.g. check that the
* output directory doesn't already exist.
* <li>
* Provide the {@link RecordWriter} implementation to be used to write out
* the output files of the job. Output files are stored in a
* {@link FileSystem}.
* </li>
* </ol>
*
* @see RecordWriter
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class OutputFormat<K, V> {
/**
* Get the {@link RecordWriter} for the given task.
*
* @param context the information about the current task.
* @return a {@link RecordWriter} to write the output for the job.
* @throws IOException
*/
public abstract RecordWriter<K, V>
getRecordWriter(TaskAttemptContext context
) throws IOException, InterruptedException;
/**
* Check for validity of the output-specification for the job.
*
* <p>This is to validate the output specification for the job when it is
* a job is submitted. Typically checks that it does not already exist,
* throwing an exception when it already exists, so that output is not
* overwritten.</p>
*
* @param context information about the job
* @throws IOException when output should not be attempted
*/
public abstract void checkOutputSpecs(JobContext context
) throws IOException,
InterruptedException;
/**
* Get the output committer for this output format. This is responsible
* for ensuring the output is committed correctly.
* @param context the task context
* @return an output committer
* @throws IOException
* @throws InterruptedException
*/
public abstract
OutputCommitter getOutputCommitter(TaskAttemptContext context
) throws IOException, InterruptedException;
}
| 3,135 | 34.235955 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.crypto.KeyGenerator;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.QueueACL;
import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.split.JobSplitWriter;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.base.Charsets;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class JobSubmitter {
protected static final Log LOG = LogFactory.getLog(JobSubmitter.class);
private static final String SHUFFLE_KEYGEN_ALGORITHM = "HmacSHA1";
private static final int SHUFFLE_KEY_LENGTH = 64;
private FileSystem jtFs;
private ClientProtocol submitClient;
private String submitHostName;
private String submitHostAddress;
JobSubmitter(FileSystem submitFs, ClientProtocol submitClient)
throws IOException {
this.submitClient = submitClient;
this.jtFs = submitFs;
}
/**
* configure the jobconf of the user with the command line options of
* -libjars, -files, -archives.
* @param job
* @throws IOException
*/
private void copyAndConfigureFiles(Job job, Path jobSubmitDir)
throws IOException {
JobResourceUploader rUploader = new JobResourceUploader(jtFs);
rUploader.uploadFiles(job, jobSubmitDir);
// Get the working directory. If not set, sets it to filesystem working dir
// This code has been added so that working directory reset before running
// the job. This is necessary for backward compatibility as other systems
// might use the public API JobConf#setWorkingDirectory to reset the working
// directory.
job.getWorkingDirectory();
}
/**
* Internal method for submitting jobs to the system.
*
* <p>The job submission process involves:
* <ol>
* <li>
* Checking the input and output specifications of the job.
* </li>
* <li>
* Computing the {@link InputSplit}s for the job.
* </li>
* <li>
* Setup the requisite accounting information for the
* {@link DistributedCache} of the job, if necessary.
* </li>
* <li>
* Copying the job's jar and configuration to the map-reduce system
* directory on the distributed file-system.
* </li>
* <li>
* Submitting the job to the <code>JobTracker</code> and optionally
* monitoring it's status.
* </li>
* </ol></p>
* @param job the configuration to submit
* @param cluster the handle to the Cluster
* @throws ClassNotFoundException
* @throws InterruptedException
* @throws IOException
*/
JobStatus submitJobInternal(Job job, Cluster cluster)
throws ClassNotFoundException, InterruptedException, IOException {
//validate the jobs output specs
checkSpecs(job);
Configuration conf = job.getConfiguration();
addMRFrameworkToDistributedCache(conf);
Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster, conf);
//configure the command line options correctly on the submitting dfs
InetAddress ip = InetAddress.getLocalHost();
if (ip != null) {
submitHostAddress = ip.getHostAddress();
submitHostName = ip.getHostName();
conf.set(MRJobConfig.JOB_SUBMITHOST,submitHostName);
conf.set(MRJobConfig.JOB_SUBMITHOSTADDR,submitHostAddress);
}
JobID jobId = submitClient.getNewJobID();
job.setJobID(jobId);
Path submitJobDir = new Path(jobStagingArea, jobId.toString());
JobStatus status = null;
try {
conf.set(MRJobConfig.USER_NAME,
UserGroupInformation.getCurrentUser().getShortUserName());
conf.set("hadoop.http.filter.initializers",
"org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString());
LOG.debug("Configuring job " + jobId + " with " + submitJobDir
+ " as the submit dir");
// get delegation token for the dir
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] { submitJobDir }, conf);
populateTokenCache(conf, job.getCredentials());
// generate a secret to authenticate shuffle transfers
if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) {
KeyGenerator keyGen;
try {
keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM);
keyGen.init(SHUFFLE_KEY_LENGTH);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Error generating shuffle secret key", e);
}
SecretKey shuffleKey = keyGen.generateKey();
TokenCache.setShuffleSecretKey(shuffleKey.getEncoded(),
job.getCredentials());
}
if (CryptoUtils.isEncryptedSpillEnabled(conf)) {
conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 1);
LOG.warn("Max job attempts set to 1 since encrypted intermediate" +
"data spill is enabled");
}
copyAndConfigureFiles(job, submitJobDir);
Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
// Create the splits for the job
LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
int maps = writeSplits(job, submitJobDir);
conf.setInt(MRJobConfig.NUM_MAPS, maps);
LOG.info("number of splits:" + maps);
// write "queue admins of the queue to which job is being submitted"
// to job file.
String queue = conf.get(MRJobConfig.QUEUE_NAME,
JobConf.DEFAULT_QUEUE_NAME);
AccessControlList acl = submitClient.getQueueAdmins(queue);
conf.set(toFullPropertyName(queue,
QueueACL.ADMINISTER_JOBS.getAclName()), acl.getAclString());
// removing jobtoken referrals before copying the jobconf to HDFS
// as the tasks don't need this setting, actually they may break
// because of it if present as the referral will point to a
// different job.
TokenCache.cleanUpTokenReferral(conf);
if (conf.getBoolean(
MRJobConfig.JOB_TOKEN_TRACKING_IDS_ENABLED,
MRJobConfig.DEFAULT_JOB_TOKEN_TRACKING_IDS_ENABLED)) {
// Add HDFS tracking ids
ArrayList<String> trackingIds = new ArrayList<String>();
for (Token<? extends TokenIdentifier> t :
job.getCredentials().getAllTokens()) {
trackingIds.add(t.decodeIdentifier().getTrackingId());
}
conf.setStrings(MRJobConfig.JOB_TOKEN_TRACKING_IDS,
trackingIds.toArray(new String[trackingIds.size()]));
}
// Set reservation info if it exists
ReservationId reservationId = job.getReservationId();
if (reservationId != null) {
conf.set(MRJobConfig.RESERVATION_ID, reservationId.toString());
}
// Write job file to submit dir
writeConf(conf, submitJobFile);
Limits.reset(conf);
//
// Now, actually submit the job (using the submit name)
//
printTokens(jobId, job.getCredentials());
status = submitClient.submitJob(
jobId, submitJobDir.toString(), job.getCredentials());
if (status != null) {
return status;
} else {
throw new IOException("Could not launch job");
}
} finally {
if (status == null) {
LOG.info("Cleaning up the staging area " + submitJobDir);
if (jtFs != null && submitJobDir != null)
jtFs.delete(submitJobDir, true);
}
}
}
private void checkSpecs(Job job) throws ClassNotFoundException,
InterruptedException, IOException {
JobConf jConf = (JobConf)job.getConfiguration();
// Check the output specification
if (jConf.getNumReduceTasks() == 0 ?
jConf.getUseNewMapper() : jConf.getUseNewReducer()) {
org.apache.hadoop.mapreduce.OutputFormat<?, ?> output =
ReflectionUtils.newInstance(job.getOutputFormatClass(),
job.getConfiguration());
output.checkOutputSpecs(job);
} else {
jConf.getOutputFormat().checkOutputSpecs(jtFs, jConf);
}
}
private void writeConf(Configuration conf, Path jobFile)
throws IOException {
// Write job file to JobTracker's fs
FSDataOutputStream out =
FileSystem.create(jtFs, jobFile,
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
try {
conf.writeXml(out);
} finally {
out.close();
}
}
private void printTokens(JobID jobId,
Credentials credentials) throws IOException {
LOG.info("Submitting tokens for job: " + jobId);
for (Token<?> token: credentials.getAllTokens()) {
LOG.info(token);
}
}
@SuppressWarnings("unchecked")
private <T extends InputSplit>
int writeNewSplits(JobContext job, Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = job.getConfiguration();
InputFormat<?, ?> input =
ReflectionUtils.newInstance(job.getInputFormatClass(), conf);
List<InputSplit> splits = input.getSplits(job);
T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
// sort the splits into order based on size, so that the biggest
// go first
Arrays.sort(array, new SplitComparator());
JobSplitWriter.createSplitFiles(jobSubmitDir, conf,
jobSubmitDir.getFileSystem(conf), array);
return array.length;
}
private int writeSplits(org.apache.hadoop.mapreduce.JobContext job,
Path jobSubmitDir) throws IOException,
InterruptedException, ClassNotFoundException {
JobConf jConf = (JobConf)job.getConfiguration();
int maps;
if (jConf.getUseNewMapper()) {
maps = writeNewSplits(job, jobSubmitDir);
} else {
maps = writeOldSplits(jConf, jobSubmitDir);
}
return maps;
}
//method to write splits for old api mapper.
private int writeOldSplits(JobConf job, Path jobSubmitDir)
throws IOException {
org.apache.hadoop.mapred.InputSplit[] splits =
job.getInputFormat().getSplits(job, job.getNumMapTasks());
// sort the splits into order based on size, so that the biggest
// go first
Arrays.sort(splits, new Comparator<org.apache.hadoop.mapred.InputSplit>() {
public int compare(org.apache.hadoop.mapred.InputSplit a,
org.apache.hadoop.mapred.InputSplit b) {
try {
long left = a.getLength();
long right = b.getLength();
if (left == right) {
return 0;
} else if (left < right) {
return 1;
} else {
return -1;
}
} catch (IOException ie) {
throw new RuntimeException("Problem getting input split size", ie);
}
}
});
JobSplitWriter.createSplitFiles(jobSubmitDir, job,
jobSubmitDir.getFileSystem(job), splits);
return splits.length;
}
private static class SplitComparator implements Comparator<InputSplit> {
@Override
public int compare(InputSplit o1, InputSplit o2) {
try {
long len1 = o1.getLength();
long len2 = o2.getLength();
if (len1 < len2) {
return 1;
} else if (len1 == len2) {
return 0;
} else {
return -1;
}
} catch (IOException ie) {
throw new RuntimeException("exception in compare", ie);
} catch (InterruptedException ie) {
throw new RuntimeException("exception in compare", ie);
}
}
}
@SuppressWarnings("unchecked")
private void readTokensFromFiles(Configuration conf, Credentials credentials)
throws IOException {
// add tokens and secrets coming from a token storage file
String binaryTokenFilename =
conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
if (binaryTokenFilename != null) {
Credentials binary = Credentials.readTokenStorageFile(
FileSystem.getLocal(conf).makeQualified(
new Path(binaryTokenFilename)),
conf);
credentials.addAll(binary);
}
// add secret keys coming from a json file
String tokensFileName = conf.get("mapreduce.job.credentials.json");
if(tokensFileName != null) {
LOG.info("loading user's secret keys from " + tokensFileName);
String localFileName = new Path(tokensFileName).toUri().getPath();
boolean json_error = false;
try {
// read JSON
ObjectMapper mapper = new ObjectMapper();
Map<String, String> nm =
mapper.readValue(new File(localFileName), Map.class);
for(Map.Entry<String, String> ent: nm.entrySet()) {
credentials.addSecretKey(new Text(ent.getKey()), ent.getValue()
.getBytes(Charsets.UTF_8));
}
} catch (JsonMappingException e) {
json_error = true;
} catch (JsonParseException e) {
json_error = true;
}
if(json_error)
LOG.warn("couldn't parse Token Cache JSON file with user secret keys");
}
}
//get secret keys and tokens and store them into TokenCache
private void populateTokenCache(Configuration conf, Credentials credentials)
throws IOException{
readTokensFromFiles(conf, credentials);
// add the delegation tokens from configuration
String [] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
LOG.debug("adding the following namenodes' delegation tokens:" +
Arrays.toString(nameNodes));
if(nameNodes != null) {
Path [] ps = new Path[nameNodes.length];
for(int i=0; i< nameNodes.length; i++) {
ps[i] = new Path(nameNodes[i]);
}
TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
}
}
@SuppressWarnings("deprecation")
private static void addMRFrameworkToDistributedCache(Configuration conf)
throws IOException {
String framework =
conf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, "");
if (!framework.isEmpty()) {
URI uri;
try {
uri = new URI(framework);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Unable to parse '" + framework
+ "' as a URI, check the setting for "
+ MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
}
String linkedName = uri.getFragment();
// resolve any symlinks in the URI path so using a "current" symlink
// to point to a specific version shows the specific version
// in the distributed cache configuration
FileSystem fs = FileSystem.get(conf);
Path frameworkPath = fs.makeQualified(
new Path(uri.getScheme(), uri.getAuthority(), uri.getPath()));
FileContext fc = FileContext.getFileContext(frameworkPath.toUri(), conf);
frameworkPath = fc.resolvePath(frameworkPath);
uri = frameworkPath.toUri();
try {
uri = new URI(uri.getScheme(), uri.getAuthority(), uri.getPath(),
null, linkedName);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
DistributedCache.addCacheArchive(uri, conf);
}
}
}
| 17,411 | 35.968153 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The record reader breaks the data into key/value pairs for input to the
* {@link Mapper}.
* @param <KEYIN>
* @param <VALUEIN>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class RecordReader<KEYIN, VALUEIN> implements Closeable {
/**
* Called once at initialization.
* @param split the split that defines the range of records to read
* @param context the information about the task
* @throws IOException
* @throws InterruptedException
*/
public abstract void initialize(InputSplit split,
TaskAttemptContext context
) throws IOException, InterruptedException;
/**
* Read the next key, value pair.
* @return true if a key/value pair was read
* @throws IOException
* @throws InterruptedException
*/
public abstract
boolean nextKeyValue() throws IOException, InterruptedException;
/**
* Get the current key
* @return the current key or null if there is no current key
* @throws IOException
* @throws InterruptedException
*/
public abstract
KEYIN getCurrentKey() throws IOException, InterruptedException;
/**
* Get the current value.
* @return the object that was read
* @throws IOException
* @throws InterruptedException
*/
public abstract
VALUEIN getCurrentValue() throws IOException, InterruptedException;
/**
* The current progress of the record reader through its data.
* @return a number between 0.0 and 1.0 that is the fraction of the data read
* @throws IOException
* @throws InterruptedException
*/
public abstract float getProgress() throws IOException, InterruptedException;
/**
* Close the record reader.
*/
public abstract void close() throws IOException;
}
| 2,820 | 31.056818 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
/**
* <code>InputFormat</code> describes the input-specification for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
* job to:<p>
* <ol>
* <li>
* Validate the input-specification of the job.
* <li>
* Split-up the input file(s) into logical {@link InputSplit}s, each of
* which is then assigned to an individual {@link Mapper}.
* </li>
* <li>
* Provide the {@link RecordReader} implementation to be used to glean
* input records from the logical <code>InputSplit</code> for processing by
* the {@link Mapper}.
* </li>
* </ol>
*
* <p>The default behavior of file-based {@link InputFormat}s, typically
* sub-classes of {@link FileInputFormat}, is to split the
* input into <i>logical</i> {@link InputSplit}s based on the total size, in
* bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via
* <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
* mapreduce.input.fileinputformat.split.minsize</a>.</p>
*
* <p>Clearly, logical splits based on input-size is insufficient for many
* applications since record boundaries are to respected. In such cases, the
* application has to also implement a {@link RecordReader} on whom lies the
* responsibility to respect record-boundaries and present a record-oriented
* view of the logical <code>InputSplit</code> to the individual task.
*
* @see InputSplit
* @see RecordReader
* @see FileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class InputFormat<K, V> {
/**
* Logically split the set of input files for the job.
*
* <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
* for processing.</p>
*
* <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
* input files are not physically split into chunks. For e.g. a split could
* be <i><input-file-path, start, offset></i> tuple. The InputFormat
* also creates the {@link RecordReader} to read the {@link InputSplit}.
*
* @param context job configuration.
* @return an array of {@link InputSplit}s for the job.
*/
public abstract
List<InputSplit> getSplits(JobContext context
) throws IOException, InterruptedException;
/**
* Create a record reader for a given split. The framework will call
* {@link RecordReader#initialize(InputSplit, TaskAttemptContext)} before
* the split is used.
* @param split the split to be read
* @param context the information about the task
* @return a new record reader
* @throws IOException
* @throws InterruptedException
*/
public abstract
RecordReader<K,V> createRecordReader(InputSplit split,
TaskAttemptContext context
) throws IOException,
InterruptedException;
}
| 4,303 | 38.851852 | 145 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.StringInterner;
/**
* Class that contains the information regarding the Job Queues which are
* maintained by the Hadoop Map/Reduce framework.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class QueueInfo implements Writable {
private String queueName = "";
//The scheduling Information object is read back as String.
//Once the scheduling information is set there is no way to recover it.
private String schedulingInfo;
private QueueState queueState;
// Jobs submitted to the queue
private JobStatus[] stats;
private List<QueueInfo> children;
private Properties props;
/**
* Default constructor for QueueInfo.
*
*/
public QueueInfo() {
// make it running by default.
this.queueState = QueueState.RUNNING;
children = new ArrayList<QueueInfo>();
props = new Properties();
}
/**
* Construct a new QueueInfo object using the queue name and the
* scheduling information passed.
*
* @param queueName Name of the job queue
* @param schedulingInfo Scheduling Information associated with the job
* queue
*/
public QueueInfo(String queueName, String schedulingInfo) {
this();
this.queueName = queueName;
this.schedulingInfo = schedulingInfo;
}
/**
*
* @param queueName
* @param schedulingInfo
* @param state
* @param stats
*/
public QueueInfo(String queueName, String schedulingInfo, QueueState state,
JobStatus[] stats) {
this(queueName, schedulingInfo);
this.queueState = state;
this.stats = stats;
}
/**
* Set the queue name of the JobQueueInfo
*
* @param queueName Name of the job queue.
*/
protected void setQueueName(String queueName) {
this.queueName = queueName;
}
/**
* Get the queue name from JobQueueInfo
*
* @return queue name
*/
public String getQueueName() {
return queueName;
}
/**
* Set the scheduling information associated to particular job queue
*
* @param schedulingInfo
*/
protected void setSchedulingInfo(String schedulingInfo) {
this.schedulingInfo = schedulingInfo;
}
/**
* Gets the scheduling information associated to particular job queue.
* If nothing is set would return <b>"N/A"</b>
*
* @return Scheduling information associated to particular Job Queue
*/
public String getSchedulingInfo() {
if(schedulingInfo != null) {
return schedulingInfo;
}else {
return "N/A";
}
}
/**
* Set the state of the queue
* @param state state of the queue.
*/
protected void setState(QueueState state) {
queueState = state;
}
/**
* Return the queue state
* @return the queue state.
*/
public QueueState getState() {
return queueState;
}
protected void setJobStatuses(JobStatus[] stats) {
this.stats = stats;
}
/**
* Get immediate children.
*
* @return list of QueueInfo
*/
public List<QueueInfo> getQueueChildren() {
return children;
}
protected void setQueueChildren(List<QueueInfo> children) {
this.children = children;
}
/**
* Get properties.
*
* @return Properties
*/
public Properties getProperties() {
return props;
}
protected void setProperties(Properties props) {
this.props = props;
}
/**
* Get the jobs submitted to queue
* @return list of JobStatus for the submitted jobs
*/
public JobStatus[] getJobStatuses() {
return stats;
}
@Override
public void readFields(DataInput in) throws IOException {
queueName = StringInterner.weakIntern(Text.readString(in));
queueState = WritableUtils.readEnum(in, QueueState.class);
schedulingInfo = StringInterner.weakIntern(Text.readString(in));
int length = in.readInt();
stats = new JobStatus[length];
for (int i = 0; i < length; i++) {
stats[i] = new JobStatus();
stats[i].readFields(in);
}
int count = in.readInt();
children.clear();
for (int i = 0; i < count; i++) {
QueueInfo childQueueInfo = new QueueInfo();
childQueueInfo.readFields(in);
children.add(childQueueInfo);
}
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, queueName);
WritableUtils.writeEnum(out, queueState);
if(schedulingInfo!= null) {
Text.writeString(out, schedulingInfo);
}else {
Text.writeString(out, "N/A");
}
out.writeInt(stats.length);
for (JobStatus stat : stats) {
stat.write(out);
}
out.writeInt(children.size());
for(QueueInfo childQueueInfo : children) {
childQueueInfo.write(out);
}
}
}
| 5,954 | 24.668103 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.task.annotation.Checkpointable;
import java.util.Iterator;
/**
* Reduces a set of intermediate values which share a key to a smaller set of
* values.
*
* <p><code>Reducer</code> implementations
* can access the {@link Configuration} for the job via the
* {@link JobContext#getConfiguration()} method.</p>
* <p><code>Reducer</code> has 3 primary phases:</p>
* <ol>
* <li>
*
* <b id="Shuffle">Shuffle</b>
*
* <p>The <code>Reducer</code> copies the sorted output from each
* {@link Mapper} using HTTP across the network.</p>
* </li>
*
* <li>
* <b id="Sort">Sort</b>
*
* <p>The framework merge sorts <code>Reducer</code> inputs by
* <code>key</code>s
* (since different <code>Mapper</code>s may have output the same key).</p>
*
* <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
* being fetched they are merged.</p>
*
* <b id="SecondarySort">SecondarySort</b>
*
* <p>To achieve a secondary sort on the values returned by the value
* iterator, the application should extend the key with the secondary
* key and define a grouping comparator. The keys will be sorted using the
* entire key, but will be grouped using the grouping comparator to decide
* which keys and values are sent in the same call to reduce.The grouping
* comparator is specified via
* {@link Job#setGroupingComparatorClass(Class)}. The sort order is
* controlled by
* {@link Job#setSortComparatorClass(Class)}.</p>
*
*
* For example, say that you want to find duplicate web pages and tag them
* all with the url of the "best" known example. You would set up the job
* like:
* <ul>
* <li>Map Input Key: url</li>
* <li>Map Input Value: document</li>
* <li>Map Output Key: document checksum, url pagerank</li>
* <li>Map Output Value: url</li>
* <li>Partitioner: by checksum</li>
* <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
* <li>OutputValueGroupingComparator: by checksum</li>
* </ul>
* </li>
*
* <li>
* <b id="Reduce">Reduce</b>
*
* <p>In this phase the
* {@link #reduce(Object, Iterable, org.apache.hadoop.mapreduce.Reducer.Context)}
* method is called for each <code><key, (collection of values)></code> in
* the sorted inputs.</p>
* <p>The output of the reduce task is typically written to a
* {@link RecordWriter} via
* {@link Context#write(Object, Object)}.</p>
* </li>
* </ol>
*
* <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class IntSumReducer<Key> extends Reducer<Key,IntWritable,
* Key,IntWritable> {
* private IntWritable result = new IntWritable();
*
* public void reduce(Key key, Iterable<IntWritable> values,
* Context context) throws IOException, InterruptedException {
* int sum = 0;
* for (IntWritable val : values) {
* sum += val.get();
* }
* result.set(sum);
* context.write(key, result);
* }
* }
* </pre></blockquote>
*
* @see Mapper
* @see Partitioner
*/
@Checkpointable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
/**
* The <code>Context</code> passed on to the {@link Reducer} implementations.
*/
public abstract class Context
implements ReduceContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
}
/**
* Called once at the start of the task.
*/
protected void setup(Context context
) throws IOException, InterruptedException {
// NOTHING
}
/**
* This method is called once for each key. Most applications will define
* their reduce class by overriding this method. The default implementation
* is an identity function.
*/
@SuppressWarnings("unchecked")
protected void reduce(KEYIN key, Iterable<VALUEIN> values, Context context
) throws IOException, InterruptedException {
for(VALUEIN value: values) {
context.write((KEYOUT) key, (VALUEOUT) value);
}
}
/**
* Called once at the end of the task.
*/
protected void cleanup(Context context
) throws IOException, InterruptedException {
// NOTHING
}
/**
* Advanced application writers can use the
* {@link #run(org.apache.hadoop.mapreduce.Reducer.Context)} method to
* control how the reduce task works.
*/
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
while (context.nextKey()) {
reduce(context.getCurrentKey(), context.getValues(), context);
// If a back up store is used, reset it
Iterator<VALUEIN> iter = context.getValues().iterator();
if(iter instanceof ReduceContext.ValueIterator) {
((ReduceContext.ValueIterator<VALUEIN>)iter).resetBackupStore();
}
}
} finally {
cleanup(context);
}
}
}
| 6,190 | 32.830601 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Progressable;
/**
* The context for task attempts.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TaskAttemptContext extends JobContext, Progressable {
/**
* Get the unique name for this task attempt.
*/
public TaskAttemptID getTaskAttemptID();
/**
* Set the current status of the task to the given string.
*/
public void setStatus(String msg);
/**
* Get the last set status message.
* @return the current status message
*/
public String getStatus();
/**
* The current progress of the task attempt.
* @return a number between 0.0 and 1.0 (inclusive) indicating the attempt's
* progress.
*/
public abstract float getProgress();
/**
* Get the {@link Counter} for the given <code>counterName</code>.
* @param counterName counter name
* @return the <code>Counter</code> for the given <code>counterName</code>
*/
public Counter getCounter(Enum<?> counterName);
/**
* Get the {@link Counter} for the given <code>groupName</code> and
* <code>counterName</code>.
* @param counterName counter name
* @return the <code>Counter</code> for the given <code>groupName</code> and
* <code>counterName</code>
*/
public Counter getCounter(String groupName, String counterName);
}
| 2,293 | 31.309859 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream;
import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.LimitInputStream;
/**
* This class provides utilities to make it easier to work with Cryptographic
* Streams. Specifically for dealing with encrypting intermediate data such
* MapReduce spill files.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class CryptoUtils {
private static final Log LOG = LogFactory.getLog(CryptoUtils.class);
public static boolean isEncryptedSpillEnabled(Configuration conf) {
return conf.getBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA,
MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA);
}
/**
* This method creates and initializes an IV (Initialization Vector)
*
* @param conf
* @return byte[]
* @throws IOException
*/
public static byte[] createIV(Configuration conf) throws IOException {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
if (isEncryptedSpillEnabled(conf)) {
byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
cryptoCodec.generateSecureRandom(iv);
return iv;
} else {
return null;
}
}
public static int cryptoPadding(Configuration conf) {
// Sizeof(IV) + long(start-offset)
return isEncryptedSpillEnabled(conf) ? CryptoCodec.getInstance(conf)
.getCipherSuite().getAlgorithmBlockSize() + 8 : 0;
}
private static byte[] getEncryptionKey() throws IOException {
return TokenCache.getEncryptedSpillKey(UserGroupInformation.getCurrentUser()
.getCredentials());
}
private static int getBufferSize(Configuration conf) {
return conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB,
MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB) * 1024;
}
/**
* Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf
* @param out
* @return FSDataOutputStream
* @throws IOException
*/
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
FSDataOutputStream out) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
byte[] iv = createIV(conf);
out.write(iv);
if (LOG.isDebugEnabled()) {
LOG.debug("IV written to Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
getBufferSize(conf), getEncryptionKey(), iv);
} else {
return out;
}
}
/**
* Wraps a given InputStream with a CryptoInputStream. The size of the data
* buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* If the value of 'length' is > -1, The InputStream is additionally
* wrapped in a LimitInputStream. CryptoStreams are late buffering in nature.
* This means they will always try to read ahead if they can. The
* LimitInputStream will ensure that the CryptoStream does not read past the
* provided length from the given Input Stream.
*
* @param conf
* @param in
* @param length
* @return InputStream
* @throws IOException
*/
public static InputStream wrapIfNecessary(Configuration conf, InputStream in,
long length) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
int bufferSize = getBufferSize(conf);
if (length > -1) {
in = new LimitInputStream(in, length);
}
byte[] offsetArray = new byte[8];
IOUtils.readFully(in, offsetArray, 0, 8);
long offset = ByteBuffer.wrap(offsetArray).getLong();
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
byte[] iv =
new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0,
cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoInputStream(in, cryptoCodec, bufferSize,
getEncryptionKey(), iv, offset + cryptoPadding(conf));
} else {
return in;
}
}
/**
* Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf
* @param in
* @return FSDataInputStream
* @throws IOException
*/
public static FSDataInputStream wrapIfNecessary(Configuration conf,
FSDataInputStream in) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
int bufferSize = getBufferSize(conf);
// Not going to be used... but still has to be read...
// Since the O/P stream always writes it..
IOUtils.readFully(in, new byte[8], 0, 8);
byte[] iv =
new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0,
cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
getEncryptionKey(), iv);
} else {
return in;
}
}
}
| 7,327 | 36.010101 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/FileSystemCounter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public enum FileSystemCounter {
BYTES_READ,
BYTES_WRITTEN,
READ_OPS,
LARGE_READ_OPS,
WRITE_OPS,
}
| 1,040 | 32.580645 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
// Per-job counters
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum JobCounter {
NUM_FAILED_MAPS,
NUM_FAILED_REDUCES,
NUM_KILLED_MAPS,
NUM_KILLED_REDUCES,
TOTAL_LAUNCHED_MAPS,
TOTAL_LAUNCHED_REDUCES,
OTHER_LOCAL_MAPS,
DATA_LOCAL_MAPS,
RACK_LOCAL_MAPS,
@Deprecated
SLOTS_MILLIS_MAPS,
@Deprecated
SLOTS_MILLIS_REDUCES,
@Deprecated
FALLOW_SLOTS_MILLIS_MAPS,
@Deprecated
FALLOW_SLOTS_MILLIS_REDUCES,
TOTAL_LAUNCHED_UBERTASKS,
NUM_UBER_SUBMAPS,
NUM_UBER_SUBREDUCES,
NUM_FAILED_UBERTASKS,
MILLIS_MAPS,
MILLIS_REDUCES,
VCORES_MILLIS_MAPS,
VCORES_MILLIS_REDUCES,
MB_MILLIS_MAPS,
MB_MILLIS_REDUCES
}
| 1,631 | 28.142857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* TaskAttemptID represents the immutable and unique identifier for
* a task attempt. Each task attempt is one particular instance of a Map or
* Reduce Task identified by its TaskID.
*
* TaskAttemptID consists of 2 parts. First part is the
* {@link TaskID}, that this TaskAttemptID belongs to.
* Second part is the task attempt number. <br>
* An example TaskAttemptID is :
* <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
* zeroth task attempt for the fifth map task in the third job
* running at the jobtracker started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse TaskAttemptID strings
* , but rather use appropriate constructors or {@link #forName(String)}
* method.
*
* @see JobID
* @see TaskID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskAttemptID extends org.apache.hadoop.mapred.ID {
protected static final String ATTEMPT = "attempt";
private TaskID taskId;
/**
* Constructs a TaskAttemptID object from given {@link TaskID}.
* @param taskId TaskID that this task belongs to
* @param id the task attempt number
*/
public TaskAttemptID(TaskID taskId, int id) {
super(id);
if(taskId == null) {
throw new IllegalArgumentException("taskId cannot be null");
}
this.taskId = taskId;
}
/**
* Constructs a TaskId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param type the TaskType
* @param taskId taskId number
* @param id the task attempt number
*/
public TaskAttemptID(String jtIdentifier, int jobId, TaskType type,
int taskId, int id) {
this(new TaskID(jtIdentifier, jobId, type, taskId), id);
}
/**
* Constructs a TaskId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param isMap whether the tip is a map
* @param taskId taskId number
* @param id the task attempt number
*/
@Deprecated
public TaskAttemptID(String jtIdentifier, int jobId, boolean isMap,
int taskId, int id) {
this(new TaskID(jtIdentifier, jobId, isMap, taskId), id);
}
public TaskAttemptID() {
taskId = new TaskID();
}
/** Returns the {@link JobID} object that this task attempt belongs to */
public JobID getJobID() {
return taskId.getJobID();
}
/** Returns the {@link TaskID} object that this task attempt belongs to */
public TaskID getTaskID() {
return taskId;
}
/**Returns whether this TaskID is a map ID */
@Deprecated
public boolean isMap() {
return taskId.isMap();
}
/**Returns the TaskType of the TaskAttemptID */
public TaskType getTaskType() {
return taskId.getTaskType();
}
@Override
public boolean equals(Object o) {
if (!super.equals(o))
return false;
TaskAttemptID that = (TaskAttemptID)o;
return this.taskId.equals(that.taskId);
}
/**
* Add the unique string to the StringBuilder
* @param builder the builder to append ot
* @return the builder that was passed in.
*/
protected StringBuilder appendTo(StringBuilder builder) {
return taskId.appendTo(builder).append(SEPARATOR).append(id);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
taskId.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
taskId.write(out);
}
@Override
public int hashCode() {
return taskId.hashCode() * 5 + id;
}
/**Compare TaskIds by first tipIds, then by task numbers. */
@Override
public int compareTo(ID o) {
TaskAttemptID that = (TaskAttemptID)o;
int tipComp = this.taskId.compareTo(that.taskId);
if(tipComp == 0) {
return this.id - that.id;
}
else return tipComp;
}
@Override
public String toString() {
return appendTo(new StringBuilder(ATTEMPT)).toString();
}
/** Construct a TaskAttemptID object from given string
* @return constructed TaskAttemptID object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static TaskAttemptID forName(String str
) throws IllegalArgumentException {
if(str == null)
return null;
String exceptionMsg = null;
try {
String[] parts = str.split(Character.toString(SEPARATOR));
if(parts.length == 6) {
if(parts[0].equals(ATTEMPT)) {
String type = parts[3];
TaskType t = TaskID.getTaskType(type.charAt(0));
if(t != null) {
return new org.apache.hadoop.mapred.TaskAttemptID
(parts[1],
Integer.parseInt(parts[2]),
t, Integer.parseInt(parts[4]),
Integer.parseInt(parts[5]));
} else
exceptionMsg = "Bad TaskType identifier. TaskAttemptId string : "
+ str + " is not properly formed.";
}
}
} catch (Exception ex) {
//fall below
}
if (exceptionMsg == null) {
exceptionMsg = "TaskAttemptId string : " + str
+ " is not properly formed";
}
throw new IllegalArgumentException(exceptionMsg);
}
}
| 6,372 | 30.087805 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/OutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <code>OutputCommitter</code> describes the commit of task output for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
* the job to:<p>
* <ol>
* <li>
* Setup the job during initialization. For example, create the temporary
* output directory for the job during the initialization of the job.
* </li>
* <li>
* Cleanup the job after the job completion. For example, remove the
* temporary output directory after the job completion.
* </li>
* <li>
* Setup the task temporary output.
* </li>
* <li>
* Check whether a task needs a commit. This is to avoid the commit
* procedure if a task does not need commit.
* </li>
* <li>
* Commit of the task output.
* </li>
* <li>
* Discard the task commit.
* </li>
* </ol>
* The methods in this class can be called from several different processes and
* from several different contexts. It is important to know which process and
* which context each is called from. Each method should be marked accordingly
* in its documentation. It is also important to note that not all methods are
* guaranteed to be called once and only once. If a method is not guaranteed to
* have this property the output committer needs to handle this appropriately.
* Also note it will only be in rare situations where they may be called
* multiple times for the same task.
*
* @see org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
* @see JobContext
* @see TaskAttemptContext
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class OutputCommitter {
/**
* For the framework to setup the job output during initialization. This is
* called from the application master process for the entire job. This will be
* called multiple times, once per job attempt.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException if temporary output could not be created
*/
public abstract void setupJob(JobContext jobContext) throws IOException;
/**
* For cleaning up the job's output after job completion. This is called
* from the application master process for the entire job. This may be called
* multiple times.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException
* @deprecated Use {@link #commitJob(JobContext)} and
* {@link #abortJob(JobContext, JobStatus.State)} instead.
*/
@Deprecated
public void cleanupJob(JobContext jobContext) throws IOException { }
/**
* For committing job's output after successful job completion. Note that this
* is invoked for jobs with final runstate as SUCCESSFUL. This is called
* from the application master process for the entire job. This is guaranteed
* to only be called once. If it throws an exception the entire job will
* fail.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException
*/
public void commitJob(JobContext jobContext) throws IOException {
cleanupJob(jobContext);
}
/**
* For aborting an unsuccessful job's output. Note that this is invoked for
* jobs with final runstate as {@link JobStatus.State#FAILED} or
* {@link JobStatus.State#KILLED}. This is called from the application
* master process for the entire job. This may be called multiple times.
*
* @param jobContext Context of the job whose output is being written.
* @param state final runstate of the job
* @throws IOException
*/
public void abortJob(JobContext jobContext, JobStatus.State state)
throws IOException {
cleanupJob(jobContext);
}
/**
* Sets up output for the task. This is called from each individual task's
* process that will output to HDFS, and it is called just for that task. This
* may be called multiple times for the same task, but for different task
* attempts.
*
* @param taskContext Context of the task whose output is being written.
* @throws IOException
*/
public abstract void setupTask(TaskAttemptContext taskContext)
throws IOException;
/**
* Check whether task needs a commit. This is called from each individual
* task's process that will output to HDFS, and it is called just for that
* task.
*
* @param taskContext
* @return true/false
* @throws IOException
*/
public abstract boolean needsTaskCommit(TaskAttemptContext taskContext)
throws IOException;
/**
* To promote the task's temporary output to final output location.
* If {@link #needsTaskCommit(TaskAttemptContext)} returns true and this
* task is the task that the AM determines finished first, this method
* is called to commit an individual task's output. This is to mark
* that tasks output as complete, as {@link #commitJob(JobContext)} will
* also be called later on if the entire job finished successfully. This
* is called from a task's process. This may be called multiple times for the
* same task, but different task attempts. It should be very rare for this to
* be called multiple times and requires odd networking failures to make this
* happen. In the future the Hadoop framework may eliminate this race.
*
* @param taskContext Context of the task whose output is being written.
* @throws IOException if commit is not successful.
*/
public abstract void commitTask(TaskAttemptContext taskContext)
throws IOException;
/**
* Discard the task output. This is called from a task's process to clean
* up a single task's output that can not yet been committed. This may be
* called multiple times for the same task, but for different task attempts.
*
* @param taskContext
* @throws IOException
*/
public abstract void abortTask(TaskAttemptContext taskContext)
throws IOException;
/**
* Is task output recovery supported for restarting jobs?
*
* If task output recovery is supported, job restart can be done more
* efficiently.
*
* @return <code>true</code> if task output recovery is supported,
* <code>false</code> otherwise
* @see #recoverTask(TaskAttemptContext)
* @deprecated Use {@link #isRecoverySupported(JobContext)} instead.
*/
@Deprecated
public boolean isRecoverySupported() {
return false;
}
/**
* Is task output recovery supported for restarting jobs?
*
* If task output recovery is supported, job restart can be done more
* efficiently.
*
* @param jobContext
* Context of the job whose output is being written.
* @return <code>true</code> if task output recovery is supported,
* <code>false</code> otherwise
* @throws IOException
* @see #recoverTask(TaskAttemptContext)
*/
public boolean isRecoverySupported(JobContext jobContext) throws IOException {
return isRecoverySupported();
}
/**
* Recover the task output.
*
* The retry-count for the job will be passed via the
* {@link MRJobConfig#APPLICATION_ATTEMPT_ID} key in
* {@link TaskAttemptContext#getConfiguration()} for the
* <code>OutputCommitter</code>. This is called from the application master
* process, but it is called individually for each task.
*
* If an exception is thrown the task will be attempted again.
*
* This may be called multiple times for the same task. But from different
* application attempts.
*
* @param taskContext Context of the task whose output is being recovered
* @throws IOException
*/
public void recoverTask(TaskAttemptContext taskContext)
throws IOException
{}
}
| 8,688 | 36.778261 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Enum representing queue state
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum QueueState {
STOPPED("stopped"), RUNNING("running"), UNDEFINED("undefined");
private final String stateName;
private static Map<String, QueueState> enumMap =
new HashMap<String, QueueState>();
static {
for (QueueState state : QueueState.values()) {
enumMap.put(state.getStateName(), state);
}
}
QueueState(String stateName) {
this.stateName = stateName;
}
/**
* @return the stateName
*/
public String getStateName() {
return stateName;
}
public static QueueState getState(String state) {
QueueState qState = enumMap.get(state);
if (qState == null) {
return UNDEFINED;
}
return qState;
}
@Override
public String toString() {
return stateName;
}
}
| 1,862 | 26.397059 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Counters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.counters.GenericCounter;
import org.apache.hadoop.mapreduce.counters.AbstractCounterGroup;
import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
import org.apache.hadoop.mapreduce.counters.FileSystemCounterGroup;
import org.apache.hadoop.mapreduce.counters.AbstractCounters;
import org.apache.hadoop.mapreduce.counters.CounterGroupFactory;
import org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup;
/**
* <p><code>Counters</code> holds per job/task counters, defined either by the
* Map-Reduce framework or applications. Each <code>Counter</code> can be of
* any {@link Enum} type.</p>
*
* <p><code>Counters</code> are bunched into {@link CounterGroup}s, each
* comprising of counters from a particular <code>Enum</code> class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Counters extends AbstractCounters<Counter, CounterGroup> {
// Mix framework group implementation into CounterGroup interface
private static class FrameworkGroupImpl<T extends Enum<T>>
extends FrameworkCounterGroup<T, Counter> implements CounterGroup {
FrameworkGroupImpl(Class<T> cls) {
super(cls);
}
@Override
protected FrameworkCounter<T> newCounter(T key) {
return new FrameworkCounter<T>(key, getName());
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
// Mix generic group implementation into CounterGroup interface
// and provide some mandatory group factory methods.
private static class GenericGroup extends AbstractCounterGroup<Counter>
implements CounterGroup {
GenericGroup(String name, String displayName, Limits limits) {
super(name, displayName, limits);
}
@Override
protected Counter newCounter(String name, String displayName, long value) {
return new GenericCounter(name, displayName, value);
}
@Override
protected Counter newCounter() {
return new GenericCounter();
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
// Mix file system group implementation into the CounterGroup interface
private static class FileSystemGroup extends FileSystemCounterGroup<Counter>
implements CounterGroup {
@Override
protected Counter newCounter(String scheme, FileSystemCounter key) {
return new FSCounter(scheme, key);
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
/**
* Provide factory methods for counter group factory implementation.
* See also the GroupFactory in
* {@link org.apache.hadoop.mapred.Counters mapred.Counters}
*/
private static class GroupFactory
extends CounterGroupFactory<Counter, CounterGroup> {
@Override
protected <T extends Enum<T>>
FrameworkGroupFactory<CounterGroup>
newFrameworkGroupFactory(final Class<T> cls) {
return new FrameworkGroupFactory<CounterGroup>() {
@Override public CounterGroup newGroup(String name) {
return new FrameworkGroupImpl<T>(cls); // impl in this package
}
};
}
@Override
protected CounterGroup newGenericGroup(String name, String displayName,
Limits limits) {
return new GenericGroup(name, displayName, limits);
}
@Override
protected CounterGroup newFileSystemGroup() {
return new FileSystemGroup();
}
}
private static final GroupFactory groupFactory = new GroupFactory();
/**
* Default constructor
*/
public Counters() {
super(groupFactory);
}
/**
* Construct the Counters object from the another counters object
* @param <C> the type of counter
* @param <G> the type of counter group
* @param counters the old counters object
*/
public <C extends Counter, G extends CounterGroupBase<C>>
Counters(AbstractCounters<C, G> counters) {
super(counters, groupFactory);
}
}
| 5,032 | 31.895425 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* Status information on the current state of the Map-Reduce cluster.
*
* <p><code>ClusterMetrics</code> provides clients with information such as:
* <ol>
* <li>
* Size of the cluster.
* </li>
* <li>
* Number of blacklisted and decommissioned trackers.
* </li>
* <li>
* Slot capacity of the cluster.
* </li>
* <li>
* The number of currently occupied/reserved map and reduce slots.
* </li>
* <li>
* The number of currently running map and reduce tasks.
* </li>
* <li>
* The number of job submissions.
* </li>
* </ol>
*
* <p>Clients can query for the latest <code>ClusterMetrics</code>, via
* {@link Cluster#getClusterStatus()}.</p>
*
* @see Cluster
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ClusterMetrics implements Writable {
private int runningMaps;
private int runningReduces;
private int occupiedMapSlots;
private int occupiedReduceSlots;
private int reservedMapSlots;
private int reservedReduceSlots;
private int totalMapSlots;
private int totalReduceSlots;
private int totalJobSubmissions;
private int numTrackers;
private int numBlacklistedTrackers;
private int numGraylistedTrackers;
private int numDecommissionedTrackers;
public ClusterMetrics() {
}
public ClusterMetrics(int runningMaps, int runningReduces,
int occupiedMapSlots, int occupiedReduceSlots, int reservedMapSlots,
int reservedReduceSlots, int mapSlots, int reduceSlots,
int totalJobSubmissions, int numTrackers, int numBlacklistedTrackers,
int numDecommissionedNodes) {
this(runningMaps, runningReduces, occupiedMapSlots, occupiedReduceSlots,
reservedMapSlots, reservedReduceSlots, mapSlots, reduceSlots,
totalJobSubmissions, numTrackers, numBlacklistedTrackers, 0,
numDecommissionedNodes);
}
public ClusterMetrics(int runningMaps, int runningReduces,
int occupiedMapSlots, int occupiedReduceSlots, int reservedMapSlots,
int reservedReduceSlots, int mapSlots, int reduceSlots,
int totalJobSubmissions, int numTrackers, int numBlacklistedTrackers,
int numGraylistedTrackers, int numDecommissionedNodes) {
this.runningMaps = runningMaps;
this.runningReduces = runningReduces;
this.occupiedMapSlots = occupiedMapSlots;
this.occupiedReduceSlots = occupiedReduceSlots;
this.reservedMapSlots = reservedMapSlots;
this.reservedReduceSlots = reservedReduceSlots;
this.totalMapSlots = mapSlots;
this.totalReduceSlots = reduceSlots;
this.totalJobSubmissions = totalJobSubmissions;
this.numTrackers = numTrackers;
this.numBlacklistedTrackers = numBlacklistedTrackers;
this.numGraylistedTrackers = numGraylistedTrackers;
this.numDecommissionedTrackers = numDecommissionedNodes;
}
/**
* Get the number of running map tasks in the cluster.
*
* @return running maps
*/
public int getRunningMaps() {
return runningMaps;
}
/**
* Get the number of running reduce tasks in the cluster.
*
* @return running reduces
*/
public int getRunningReduces() {
return runningReduces;
}
/**
* Get number of occupied map slots in the cluster.
*
* @return occupied map slot count
*/
public int getOccupiedMapSlots() {
return occupiedMapSlots;
}
/**
* Get the number of occupied reduce slots in the cluster.
*
* @return occupied reduce slot count
*/
public int getOccupiedReduceSlots() {
return occupiedReduceSlots;
}
/**
* Get number of reserved map slots in the cluster.
*
* @return reserved map slot count
*/
public int getReservedMapSlots() {
return reservedMapSlots;
}
/**
* Get the number of reserved reduce slots in the cluster.
*
* @return reserved reduce slot count
*/
public int getReservedReduceSlots() {
return reservedReduceSlots;
}
/**
* Get the total number of map slots in the cluster.
*
* @return map slot capacity
*/
public int getMapSlotCapacity() {
return totalMapSlots;
}
/**
* Get the total number of reduce slots in the cluster.
*
* @return reduce slot capacity
*/
public int getReduceSlotCapacity() {
return totalReduceSlots;
}
/**
* Get the total number of job submissions in the cluster.
*
* @return total number of job submissions
*/
public int getTotalJobSubmissions() {
return totalJobSubmissions;
}
/**
* Get the number of active trackers in the cluster.
*
* @return active tracker count.
*/
public int getTaskTrackerCount() {
return numTrackers;
}
/**
* Get the number of blacklisted trackers in the cluster.
*
* @return blacklisted tracker count
*/
public int getBlackListedTaskTrackerCount() {
return numBlacklistedTrackers;
}
/**
* Get the number of graylisted trackers in the cluster.
*
* @return graylisted tracker count
*/
public int getGrayListedTaskTrackerCount() {
return numGraylistedTrackers;
}
/**
* Get the number of decommissioned trackers in the cluster.
*
* @return decommissioned tracker count
*/
public int getDecommissionedTaskTrackerCount() {
return numDecommissionedTrackers;
}
@Override
public void readFields(DataInput in) throws IOException {
runningMaps = in.readInt();
runningReduces = in.readInt();
occupiedMapSlots = in.readInt();
occupiedReduceSlots = in.readInt();
reservedMapSlots = in.readInt();
reservedReduceSlots = in.readInt();
totalMapSlots = in.readInt();
totalReduceSlots = in.readInt();
totalJobSubmissions = in.readInt();
numTrackers = in.readInt();
numBlacklistedTrackers = in.readInt();
numGraylistedTrackers = in.readInt();
numDecommissionedTrackers = in.readInt();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(runningMaps);
out.writeInt(runningReduces);
out.writeInt(occupiedMapSlots);
out.writeInt(occupiedReduceSlots);
out.writeInt(reservedMapSlots);
out.writeInt(reservedReduceSlots);
out.writeInt(totalMapSlots);
out.writeInt(totalReduceSlots);
out.writeInt(totalJobSubmissions);
out.writeInt(numTrackers);
out.writeInt(numBlacklistedTrackers);
out.writeInt(numGraylistedTrackers);
out.writeInt(numDecommissionedTrackers);
}
}
| 7,491 | 27.704981 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.NumberFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
/**
* JobID represents the immutable and unique identifier for
* the job. JobID consists of two parts. First part
* represents the jobtracker identifier, so that jobID to jobtracker map
* is defined. For cluster setup this string is the jobtracker
* start time, for local setting, it is "local" and a random number.
* Second part of the JobID is the job number. <br>
* An example JobID is :
* <code>job_200707121733_0003</code> , which represents the third job
* running at the jobtracker started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse JobID strings, but rather
* use appropriate constructors or {@link #forName(String)} method.
*
* @see TaskID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobID extends org.apache.hadoop.mapred.ID
implements Comparable<ID> {
public static final String JOB = "job";
// Jobid regex for various tools and framework components
public static final String JOBID_REGEX =
JOB + SEPARATOR + "[0-9]+" + SEPARATOR + "[0-9]+";
private final Text jtIdentifier;
protected static final NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setGroupingUsed(false);
idFormat.setMinimumIntegerDigits(4);
}
/**
* Constructs a JobID object
* @param jtIdentifier jobTracker identifier
* @param id job number
*/
public JobID(String jtIdentifier, int id) {
super(id);
this.jtIdentifier = new Text(jtIdentifier);
}
public JobID() {
jtIdentifier = new Text();
}
public String getJtIdentifier() {
return jtIdentifier.toString();
}
@Override
public boolean equals(Object o) {
if (!super.equals(o))
return false;
JobID that = (JobID)o;
return this.jtIdentifier.equals(that.jtIdentifier);
}
/**Compare JobIds by first jtIdentifiers, then by job numbers*/
@Override
public int compareTo(ID o) {
JobID that = (JobID)o;
int jtComp = this.jtIdentifier.compareTo(that.jtIdentifier);
if(jtComp == 0) {
return this.id - that.id;
}
else return jtComp;
}
/**
* Add the stuff after the "job" prefix to the given builder. This is useful,
* because the sub-ids use this substring at the start of their string.
* @param builder the builder to append to
* @return the builder that was passed in
*/
public StringBuilder appendTo(StringBuilder builder) {
builder.append(SEPARATOR);
builder.append(jtIdentifier);
builder.append(SEPARATOR);
builder.append(idFormat.format(id));
return builder;
}
@Override
public int hashCode() {
return jtIdentifier.hashCode() + id;
}
@Override
public String toString() {
return appendTo(new StringBuilder(JOB)).toString();
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
this.jtIdentifier.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
jtIdentifier.write(out);
}
/** Construct a JobId object from given string
* @return constructed JobId object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static JobID forName(String str) throws IllegalArgumentException {
if(str == null)
return null;
try {
String[] parts = str.split("_");
if(parts.length == 3) {
if(parts[0].equals(JOB)) {
return new org.apache.hadoop.mapred.JobID(parts[1],
Integer.parseInt(parts[2]));
}
}
}catch (Exception ex) {//fall below
}
throw new IllegalArgumentException("JobId string : " + str
+ " is not properly formed");
}
}
| 4,951 | 29.757764 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ReservationId;
/**
* The job submitter's view of the Job.
*
* <p>It allows the user to configure the
* job, submit it, control its execution, and query the state. The set methods
* only work until the job is submitted, afterwards they will throw an
* IllegalStateException. </p>
*
* <p>
* Normally the user creates the application, describes various facets of the
* job via {@link Job} and then submits the job and monitor its progress.</p>
*
* <p>Here is an example on how to submit a job:</p>
* <p><blockquote><pre>
* // Create a new Job
* Job job = Job.getInstance();
* job.setJarByClass(MyJob.class);
*
* // Specify various job-specific parameters
* job.setJobName("myjob");
*
* job.setInputPath(new Path("in"));
* job.setOutputPath(new Path("out"));
*
* job.setMapperClass(MyJob.MyMapper.class);
* job.setReducerClass(MyJob.MyReducer.class);
*
* // Submit the job, then poll for progress until the job is complete
* job.waitForCompletion(true);
* </pre></blockquote>
*
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Job extends JobContextImpl implements JobContext {
private static final Log LOG = LogFactory.getLog(Job.class);
@InterfaceStability.Evolving
public static enum JobState {DEFINE, RUNNING};
private static final long MAX_JOBSTATUS_AGE = 1000 * 2;
public static final String OUTPUT_FILTER = "mapreduce.client.output.filter";
/** Key in mapred-*.xml that sets completionPollInvervalMillis */
public static final String COMPLETION_POLL_INTERVAL_KEY =
"mapreduce.client.completion.pollinterval";
/** Default completionPollIntervalMillis is 5000 ms. */
static final int DEFAULT_COMPLETION_POLL_INTERVAL = 5000;
/** Key in mapred-*.xml that sets progMonitorPollIntervalMillis */
public static final String PROGRESS_MONITOR_POLL_INTERVAL_KEY =
"mapreduce.client.progressmonitor.pollinterval";
/** Default progMonitorPollIntervalMillis is 1000 ms. */
static final int DEFAULT_MONITOR_POLL_INTERVAL = 1000;
public static final String USED_GENERIC_PARSER =
"mapreduce.client.genericoptionsparser.used";
public static final String SUBMIT_REPLICATION =
"mapreduce.client.submit.file.replication";
public static final int DEFAULT_SUBMIT_REPLICATION = 10;
@InterfaceStability.Evolving
public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
static {
ConfigUtil.loadResources();
}
private JobState state = JobState.DEFINE;
private JobStatus status;
private long statustime;
private Cluster cluster;
private ReservationId reservationId;
/**
* @deprecated Use {@link #getInstance()}
*/
@Deprecated
public Job() throws IOException {
this(new JobConf(new Configuration()));
}
/**
* @deprecated Use {@link #getInstance(Configuration)}
*/
@Deprecated
public Job(Configuration conf) throws IOException {
this(new JobConf(conf));
}
/**
* @deprecated Use {@link #getInstance(Configuration, String)}
*/
@Deprecated
public Job(Configuration conf, String jobName) throws IOException {
this(new JobConf(conf));
setJobName(jobName);
}
Job(JobConf conf) throws IOException {
super(conf, null);
// propagate existing user credentials to job
this.credentials.mergeAll(this.ugi.getCredentials());
this.cluster = null;
}
Job(JobStatus status, JobConf conf) throws IOException {
this(conf);
setJobID(status.getJobID());
this.status = status;
state = JobState.RUNNING;
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} .
* A Cluster will be created with a generic {@link Configuration}.
*
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
*/
public static Job getInstance() throws IOException {
// create with a null Cluster
return getInstance(new Configuration());
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} and a
* given {@link Configuration}.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* A Cluster will be created from the conf parameter only when it's needed.
*
* @param conf the configuration
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
*/
public static Job getInstance(Configuration conf) throws IOException {
// create with a null Cluster
JobConf jobConf = new JobConf(conf);
return new Job(jobConf);
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} and a given jobName.
* A Cluster will be created from the conf parameter only when it's needed.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* @param conf the configuration
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
*/
public static Job getInstance(Configuration conf, String jobName)
throws IOException {
// create with a null Cluster
Job result = getInstance(conf);
result.setJobName(jobName);
return result;
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} and given
* {@link Configuration} and {@link JobStatus}.
* A Cluster will be created from the conf parameter only when it's needed.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* @param status job status
* @param conf job configuration
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
*/
public static Job getInstance(JobStatus status, Configuration conf)
throws IOException {
return new Job(status, new JobConf(conf));
}
/**
* Creates a new {@link Job} with no particular {@link Cluster}.
* A Cluster will be created from the conf parameter only when it's needed.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* @param ignored
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
* @deprecated Use {@link #getInstance()}
*/
@Deprecated
public static Job getInstance(Cluster ignored) throws IOException {
return getInstance();
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} and given
* {@link Configuration}.
* A Cluster will be created from the conf parameter only when it's needed.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* @param ignored
* @param conf job configuration
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
* @deprecated Use {@link #getInstance(Configuration)}
*/
@Deprecated
public static Job getInstance(Cluster ignored, Configuration conf)
throws IOException {
return getInstance(conf);
}
/**
* Creates a new {@link Job} with no particular {@link Cluster} and given
* {@link Configuration} and {@link JobStatus}.
* A Cluster will be created from the conf parameter only when it's needed.
*
* The <code>Job</code> makes a copy of the <code>Configuration</code> so
* that any necessary internal modifications do not reflect on the incoming
* parameter.
*
* @param cluster cluster
* @param status job status
* @param conf job configuration
* @return the {@link Job} , with no connection to a cluster yet.
* @throws IOException
*/
@Private
public static Job getInstance(Cluster cluster, JobStatus status,
Configuration conf) throws IOException {
Job job = getInstance(status, conf);
job.setCluster(cluster);
return job;
}
private void ensureState(JobState state) throws IllegalStateException {
if (state != this.state) {
throw new IllegalStateException("Job in state "+ this.state +
" instead of " + state);
}
if (state == JobState.RUNNING && cluster == null) {
throw new IllegalStateException
("Job in state " + this.state
+ ", but it isn't attached to any job tracker!");
}
}
/**
* Some methods rely on having a recent job status object. Refresh
* it, if necessary
*/
synchronized void ensureFreshStatus()
throws IOException {
if (System.currentTimeMillis() - statustime > MAX_JOBSTATUS_AGE) {
updateStatus();
}
}
/** Some methods need to update status immediately. So, refresh
* immediately
* @throws IOException
*/
synchronized void updateStatus() throws IOException {
try {
this.status = ugi.doAs(new PrivilegedExceptionAction<JobStatus>() {
@Override
public JobStatus run() throws IOException, InterruptedException {
return cluster.getClient().getJobStatus(status.getJobID());
}
});
}
catch (InterruptedException ie) {
throw new IOException(ie);
}
if (this.status == null) {
throw new IOException("Job status not available ");
}
this.statustime = System.currentTimeMillis();
}
public JobStatus getStatus() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status;
}
/**
* Returns the current state of the Job.
*
* @return JobStatus#State
* @throws IOException
* @throws InterruptedException
*/
public JobStatus.State getJobState()
throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.getState();
}
/**
* Get the URL where some job progress information will be displayed.
*
* @return the URL where some job progress information will be displayed.
*/
public String getTrackingURL(){
ensureState(JobState.RUNNING);
return status.getTrackingUrl().toString();
}
/**
* Get the path of the submitted job configuration.
*
* @return the path of the submitted job configuration.
*/
public String getJobFile() {
ensureState(JobState.RUNNING);
return status.getJobFile();
}
/**
* Get start time of the job.
*
* @return the start time of the job
*/
public long getStartTime() {
ensureState(JobState.RUNNING);
return status.getStartTime();
}
/**
* Get finish time of the job.
*
* @return the finish time of the job
*/
public long getFinishTime() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.getFinishTime();
}
/**
* Get scheduling info of the job.
*
* @return the scheduling info of the job
*/
public String getSchedulingInfo() {
ensureState(JobState.RUNNING);
return status.getSchedulingInfo();
}
/**
* Get scheduling info of the job.
*
* @return the scheduling info of the job
*/
public JobPriority getPriority() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.getPriority();
}
/**
* The user-specified job name.
*/
public String getJobName() {
if (state == JobState.DEFINE) {
return super.getJobName();
}
ensureState(JobState.RUNNING);
return status.getJobName();
}
public String getHistoryUrl() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.getHistoryFile();
}
public boolean isRetired() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.isRetired();
}
@Private
public Cluster getCluster() {
return cluster;
}
/** Only for mocks in unit tests. */
@Private
private void setCluster(Cluster cluster) {
this.cluster = cluster;
}
/**
* Dump stats to screen.
*/
@Override
public String toString() {
ensureState(JobState.RUNNING);
String reasonforFailure = " ";
int numMaps = 0;
int numReduces = 0;
try {
updateStatus();
if (status.getState().equals(JobStatus.State.FAILED))
reasonforFailure = getTaskFailureEventString();
numMaps = getTaskReports(TaskType.MAP).length;
numReduces = getTaskReports(TaskType.REDUCE).length;
} catch (IOException e) {
} catch (InterruptedException ie) {
}
StringBuffer sb = new StringBuffer();
sb.append("Job: ").append(status.getJobID()).append("\n");
sb.append("Job File: ").append(status.getJobFile()).append("\n");
sb.append("Job Tracking URL : ").append(status.getTrackingUrl());
sb.append("\n");
sb.append("Uber job : ").append(status.isUber()).append("\n");
sb.append("Number of maps: ").append(numMaps).append("\n");
sb.append("Number of reduces: ").append(numReduces).append("\n");
sb.append("map() completion: ");
sb.append(status.getMapProgress()).append("\n");
sb.append("reduce() completion: ");
sb.append(status.getReduceProgress()).append("\n");
sb.append("Job state: ");
sb.append(status.getState()).append("\n");
sb.append("retired: ").append(status.isRetired()).append("\n");
sb.append("reason for failure: ").append(reasonforFailure);
return sb.toString();
}
/**
* @return taskid which caused job failure
* @throws IOException
* @throws InterruptedException
*/
String getTaskFailureEventString() throws IOException,
InterruptedException {
int failCount = 1;
TaskCompletionEvent lastEvent = null;
TaskCompletionEvent[] events = ugi.doAs(new
PrivilegedExceptionAction<TaskCompletionEvent[]>() {
@Override
public TaskCompletionEvent[] run() throws IOException,
InterruptedException {
return cluster.getClient().getTaskCompletionEvents(
status.getJobID(), 0, 10);
}
});
for (TaskCompletionEvent event : events) {
if (event.getStatus().equals(TaskCompletionEvent.Status.FAILED)) {
failCount++;
lastEvent = event;
}
}
if (lastEvent == null) {
return "There are no failed tasks for the job. "
+ "Job is failed due to some other reason and reason "
+ "can be found in the logs.";
}
String[] taskAttemptID = lastEvent.getTaskAttemptId().toString().split("_", 2);
String taskID = taskAttemptID[1].substring(0, taskAttemptID[1].length()-2);
return (" task " + taskID + " failed " +
failCount + " times " + "For details check tasktracker at: " +
lastEvent.getTaskTrackerHttp());
}
/**
* Get the information of the current state of the tasks of a job.
*
* @param type Type of the task
* @return the list of all of the map tips.
* @throws IOException
*/
public TaskReport[] getTaskReports(TaskType type)
throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
final TaskType tmpType = type;
return ugi.doAs(new PrivilegedExceptionAction<TaskReport[]>() {
public TaskReport[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskReports(getJobID(), tmpType);
}
});
}
/**
* Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
* and 1.0. When all map tasks have completed, the function returns 1.0.
*
* @return the progress of the job's map-tasks.
* @throws IOException
*/
public float mapProgress() throws IOException {
ensureState(JobState.RUNNING);
ensureFreshStatus();
return status.getMapProgress();
}
/**
* Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
* and 1.0. When all reduce tasks have completed, the function returns 1.0.
*
* @return the progress of the job's reduce-tasks.
* @throws IOException
*/
public float reduceProgress() throws IOException {
ensureState(JobState.RUNNING);
ensureFreshStatus();
return status.getReduceProgress();
}
/**
* Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0
* and 1.0. When all cleanup tasks have completed, the function returns 1.0.
*
* @return the progress of the job's cleanup-tasks.
* @throws IOException
*/
public float cleanupProgress() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
ensureFreshStatus();
return status.getCleanupProgress();
}
/**
* Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0
* and 1.0. When all setup tasks have completed, the function returns 1.0.
*
* @return the progress of the job's setup-tasks.
* @throws IOException
*/
public float setupProgress() throws IOException {
ensureState(JobState.RUNNING);
ensureFreshStatus();
return status.getSetupProgress();
}
/**
* Check if the job is finished or not.
* This is a non-blocking call.
*
* @return <code>true</code> if the job is complete, else <code>false</code>.
* @throws IOException
*/
public boolean isComplete() throws IOException {
ensureState(JobState.RUNNING);
updateStatus();
return status.isJobComplete();
}
/**
* Check if the job completed successfully.
*
* @return <code>true</code> if the job succeeded, else <code>false</code>.
* @throws IOException
*/
public boolean isSuccessful() throws IOException {
ensureState(JobState.RUNNING);
updateStatus();
return status.getState() == JobStatus.State.SUCCEEDED;
}
/**
* Kill the running job. Blocks until all job tasks have been
* killed as well. If the job is no longer running, it simply returns.
*
* @throws IOException
*/
public void killJob() throws IOException {
ensureState(JobState.RUNNING);
try {
cluster.getClient().killJob(getJobID());
}
catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Set the priority of a running job.
* @param priority the new priority for the job.
* @throws IOException
*/
public void setPriority(JobPriority priority)
throws IOException, InterruptedException {
if (state == JobState.DEFINE) {
conf.setJobPriority(
org.apache.hadoop.mapred.JobPriority.valueOf(priority.name()));
} else {
ensureState(JobState.RUNNING);
final JobPriority tmpPriority = priority;
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, InterruptedException {
cluster.getClient().setJobPriority(getJobID(), tmpPriority.toString());
return null;
}
});
}
}
/**
* Get events indicating completion (success/failure) of component tasks.
*
* @param startFrom index to start fetching events from
* @param numEvents number of events to fetch
* @return an array of {@link TaskCompletionEvent}s
* @throws IOException
*/
public TaskCompletionEvent[] getTaskCompletionEvents(final int startFrom,
final int numEvents) throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
return ugi.doAs(new PrivilegedExceptionAction<TaskCompletionEvent[]>() {
@Override
public TaskCompletionEvent[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskCompletionEvents(getJobID(),
startFrom, numEvents);
}
});
}
/**
* Get events indicating completion (success/failure) of component tasks.
*
* @param startFrom index to start fetching events from
* @return an array of {@link org.apache.hadoop.mapred.TaskCompletionEvent}s
* @throws IOException
*/
public org.apache.hadoop.mapred.TaskCompletionEvent[]
getTaskCompletionEvents(final int startFrom) throws IOException {
try {
TaskCompletionEvent[] events = getTaskCompletionEvents(startFrom, 10);
org.apache.hadoop.mapred.TaskCompletionEvent[] retEvents =
new org.apache.hadoop.mapred.TaskCompletionEvent[events.length];
for (int i = 0; i < events.length; i++) {
retEvents[i] = org.apache.hadoop.mapred.TaskCompletionEvent.downgrade
(events[i]);
}
return retEvents;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Kill indicated task attempt.
* @param taskId the id of the task to kill.
* @param shouldFail if <code>true</code> the task is failed and added
* to failed tasks list, otherwise it is just killed,
* w/o affecting job failure status.
*/
@Private
public boolean killTask(final TaskAttemptID taskId,
final boolean shouldFail) throws IOException {
ensureState(JobState.RUNNING);
try {
return ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
public Boolean run() throws IOException, InterruptedException {
return cluster.getClient().killTask(taskId, shouldFail);
}
});
}
catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Kill indicated task attempt.
*
* @param taskId the id of the task to be terminated.
* @throws IOException
*/
public void killTask(final TaskAttemptID taskId)
throws IOException {
killTask(taskId, false);
}
/**
* Fail indicated task attempt.
*
* @param taskId the id of the task to be terminated.
* @throws IOException
*/
public void failTask(final TaskAttemptID taskId)
throws IOException {
killTask(taskId, true);
}
/**
* Gets the counters for this job. May return null if the job has been
* retired and the job is no longer in the completed job store.
*
* @return the counters for this job.
* @throws IOException
*/
public Counters getCounters()
throws IOException {
ensureState(JobState.RUNNING);
try {
return ugi.doAs(new PrivilegedExceptionAction<Counters>() {
@Override
public Counters run() throws IOException, InterruptedException {
return cluster.getClient().getJobCounters(getJobID());
}
});
}
catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Gets the diagnostic messages for a given task attempt.
* @param taskid
* @return the list of diagnostic messages for the task
* @throws IOException
*/
public String[] getTaskDiagnostics(final TaskAttemptID taskid)
throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
return ugi.doAs(new PrivilegedExceptionAction<String[]>() {
@Override
public String[] run() throws IOException, InterruptedException {
return cluster.getClient().getTaskDiagnostics(taskid);
}
});
}
/**
* Set the number of reduce tasks for the job.
* @param tasks the number of reduce tasks
* @throws IllegalStateException if the job is submitted
*/
public void setNumReduceTasks(int tasks) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setNumReduceTasks(tasks);
}
/**
* Set the current working directory for the default file system.
*
* @param dir the new current working directory.
* @throws IllegalStateException if the job is submitted
*/
public void setWorkingDirectory(Path dir) throws IOException {
ensureState(JobState.DEFINE);
conf.setWorkingDirectory(dir);
}
/**
* Set the {@link InputFormat} for the job.
* @param cls the <code>InputFormat</code> to use
* @throws IllegalStateException if the job is submitted
*/
public void setInputFormatClass(Class<? extends InputFormat> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(INPUT_FORMAT_CLASS_ATTR, cls,
InputFormat.class);
}
/**
* Set the {@link OutputFormat} for the job.
* @param cls the <code>OutputFormat</code> to use
* @throws IllegalStateException if the job is submitted
*/
public void setOutputFormatClass(Class<? extends OutputFormat> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(OUTPUT_FORMAT_CLASS_ATTR, cls,
OutputFormat.class);
}
/**
* Set the {@link Mapper} for the job.
* @param cls the <code>Mapper</code> to use
* @throws IllegalStateException if the job is submitted
*/
public void setMapperClass(Class<? extends Mapper> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(MAP_CLASS_ATTR, cls, Mapper.class);
}
/**
* Set the Jar by finding where a given class came from.
* @param cls the example class
*/
public void setJarByClass(Class<?> cls) {
ensureState(JobState.DEFINE);
conf.setJarByClass(cls);
}
/**
* Set the job jar
*/
public void setJar(String jar) {
ensureState(JobState.DEFINE);
conf.setJar(jar);
}
/**
* Set the reported username for this job.
*
* @param user the username for this job.
*/
public void setUser(String user) {
ensureState(JobState.DEFINE);
conf.setUser(user);
}
/**
* Set the combiner class for the job.
* @param cls the combiner to use
* @throws IllegalStateException if the job is submitted
*/
public void setCombinerClass(Class<? extends Reducer> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(COMBINE_CLASS_ATTR, cls, Reducer.class);
}
/**
* Set the {@link Reducer} for the job.
* @param cls the <code>Reducer</code> to use
* @throws IllegalStateException if the job is submitted
*/
public void setReducerClass(Class<? extends Reducer> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(REDUCE_CLASS_ATTR, cls, Reducer.class);
}
/**
* Set the {@link Partitioner} for the job.
* @param cls the <code>Partitioner</code> to use
* @throws IllegalStateException if the job is submitted
*/
public void setPartitionerClass(Class<? extends Partitioner> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setClass(PARTITIONER_CLASS_ATTR, cls,
Partitioner.class);
}
/**
* Set the key class for the map output data. This allows the user to
* specify the map output key class to be different than the final output
* value class.
*
* @param theClass the map output key class.
* @throws IllegalStateException if the job is submitted
*/
public void setMapOutputKeyClass(Class<?> theClass
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setMapOutputKeyClass(theClass);
}
/**
* Set the value class for the map output data. This allows the user to
* specify the map output value class to be different than the final output
* value class.
*
* @param theClass the map output value class.
* @throws IllegalStateException if the job is submitted
*/
public void setMapOutputValueClass(Class<?> theClass
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setMapOutputValueClass(theClass);
}
/**
* Set the key class for the job output data.
*
* @param theClass the key class for the job output data.
* @throws IllegalStateException if the job is submitted
*/
public void setOutputKeyClass(Class<?> theClass
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setOutputKeyClass(theClass);
}
/**
* Set the value class for job outputs.
*
* @param theClass the value class for job outputs.
* @throws IllegalStateException if the job is submitted
*/
public void setOutputValueClass(Class<?> theClass
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setOutputValueClass(theClass);
}
/**
* Define the comparator that controls which keys are grouped together
* for a single call to combiner,
* {@link Reducer#reduce(Object, Iterable,
* org.apache.hadoop.mapreduce.Reducer.Context)}
*
* @param cls the raw comparator to use
* @throws IllegalStateException if the job is submitted
*/
public void setCombinerKeyGroupingComparatorClass(
Class<? extends RawComparator> cls) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setCombinerKeyGroupingComparator(cls);
}
/**
* Define the comparator that controls how the keys are sorted before they
* are passed to the {@link Reducer}.
* @param cls the raw comparator
* @throws IllegalStateException if the job is submitted
* @see #setCombinerKeyGroupingComparatorClass(Class)
*/
public void setSortComparatorClass(Class<? extends RawComparator> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setOutputKeyComparatorClass(cls);
}
/**
* Define the comparator that controls which keys are grouped together
* for a single call to
* {@link Reducer#reduce(Object, Iterable,
* org.apache.hadoop.mapreduce.Reducer.Context)}
* @param cls the raw comparator to use
* @throws IllegalStateException if the job is submitted
* @see #setCombinerKeyGroupingComparatorClass(Class)
*/
public void setGroupingComparatorClass(Class<? extends RawComparator> cls
) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setOutputValueGroupingComparator(cls);
}
/**
* Set the user-specified job name.
*
* @param name the job's new name.
* @throws IllegalStateException if the job is submitted
*/
public void setJobName(String name) throws IllegalStateException {
ensureState(JobState.DEFINE);
conf.setJobName(name);
}
/**
* Turn speculative execution on or off for this job.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on, else <code>false</code>.
*/
public void setSpeculativeExecution(boolean speculativeExecution) {
ensureState(JobState.DEFINE);
conf.setSpeculativeExecution(speculativeExecution);
}
/**
* Turn speculative execution on or off for this job for map tasks.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on for map tasks,
* else <code>false</code>.
*/
public void setMapSpeculativeExecution(boolean speculativeExecution) {
ensureState(JobState.DEFINE);
conf.setMapSpeculativeExecution(speculativeExecution);
}
/**
* Turn speculative execution on or off for this job for reduce tasks.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on for reduce tasks,
* else <code>false</code>.
*/
public void setReduceSpeculativeExecution(boolean speculativeExecution) {
ensureState(JobState.DEFINE);
conf.setReduceSpeculativeExecution(speculativeExecution);
}
/**
* Specify whether job-setup and job-cleanup is needed for the job
*
* @param needed If <code>true</code>, job-setup and job-cleanup will be
* considered from {@link OutputCommitter}
* else ignored.
*/
public void setJobSetupCleanupNeeded(boolean needed) {
ensureState(JobState.DEFINE);
conf.setBoolean(SETUP_CLEANUP_NEEDED, needed);
}
/**
* Set the given set of archives
* @param archives The list of archives that need to be localized
*/
public void setCacheArchives(URI[] archives) {
ensureState(JobState.DEFINE);
DistributedCache.setCacheArchives(archives, conf);
}
/**
* Set the given set of files
* @param files The list of files that need to be localized
*/
public void setCacheFiles(URI[] files) {
ensureState(JobState.DEFINE);
DistributedCache.setCacheFiles(files, conf);
}
/**
* Add a archives to be localized
* @param uri The uri of the cache to be localized
*/
public void addCacheArchive(URI uri) {
ensureState(JobState.DEFINE);
DistributedCache.addCacheArchive(uri, conf);
}
/**
* Add a file to be localized
* @param uri The uri of the cache to be localized
*/
public void addCacheFile(URI uri) {
ensureState(JobState.DEFINE);
DistributedCache.addCacheFile(uri, conf);
}
/**
* Add an file path to the current set of classpath entries It adds the file
* to cache as well.
*
* Files added with this method will not be unpacked while being added to the
* classpath.
* To add archives to classpath, use the {@link #addArchiveToClassPath(Path)}
* method instead.
*
* @param file Path of the file to be added
*/
public void addFileToClassPath(Path file)
throws IOException {
ensureState(JobState.DEFINE);
DistributedCache.addFileToClassPath(file, conf, file.getFileSystem(conf));
}
/**
* Add an archive path to the current set of classpath entries. It adds the
* archive to cache as well.
*
* Archive files will be unpacked and added to the classpath
* when being distributed.
*
* @param archive Path of the archive to be added
*/
public void addArchiveToClassPath(Path archive)
throws IOException {
ensureState(JobState.DEFINE);
DistributedCache.addArchiveToClassPath(archive, conf, archive.getFileSystem(conf));
}
/**
* Originally intended to enable symlinks, but currently symlinks cannot be
* disabled.
*/
@Deprecated
public void createSymlink() {
ensureState(JobState.DEFINE);
DistributedCache.createSymlink(conf);
}
/**
* Expert: Set the number of maximum attempts that will be made to run a
* map task.
*
* @param n the number of attempts per map task.
*/
public void setMaxMapAttempts(int n) {
ensureState(JobState.DEFINE);
conf.setMaxMapAttempts(n);
}
/**
* Expert: Set the number of maximum attempts that will be made to run a
* reduce task.
*
* @param n the number of attempts per reduce task.
*/
public void setMaxReduceAttempts(int n) {
ensureState(JobState.DEFINE);
conf.setMaxReduceAttempts(n);
}
/**
* Set whether the system should collect profiler information for some of
* the tasks in this job? The information is stored in the user log
* directory.
* @param newValue true means it should be gathered
*/
public void setProfileEnabled(boolean newValue) {
ensureState(JobState.DEFINE);
conf.setProfileEnabled(newValue);
}
/**
* Set the profiler configuration arguments. If the string contains a '%s' it
* will be replaced with the name of the profiling output file when the task
* runs.
*
* This value is passed to the task child JVM on the command line.
*
* @param value the configuration string
*/
public void setProfileParams(String value) {
ensureState(JobState.DEFINE);
conf.setProfileParams(value);
}
/**
* Set the ranges of maps or reduces to profile. setProfileEnabled(true)
* must also be called.
* @param newValue a set of integer ranges of the map ids
*/
public void setProfileTaskRange(boolean isMap, String newValue) {
ensureState(JobState.DEFINE);
conf.setProfileTaskRange(isMap, newValue);
}
private void ensureNotSet(String attr, String msg) throws IOException {
if (conf.get(attr) != null) {
throw new IOException(attr + " is incompatible with " + msg + " mode.");
}
}
/**
* Sets the flag that will allow the JobTracker to cancel the HDFS delegation
* tokens upon job completion. Defaults to true.
*/
public void setCancelDelegationTokenUponJobCompletion(boolean value) {
ensureState(JobState.DEFINE);
conf.setBoolean(JOB_CANCEL_DELEGATION_TOKEN, value);
}
/**
* Default to the new APIs unless they are explicitly set or the old mapper or
* reduce attributes are used.
* @throws IOException if the configuration is inconsistant
*/
private void setUseNewAPI() throws IOException {
int numReduces = conf.getNumReduceTasks();
String oldMapperClass = "mapred.mapper.class";
String oldReduceClass = "mapred.reducer.class";
conf.setBooleanIfUnset("mapred.mapper.new-api",
conf.get(oldMapperClass) == null);
if (conf.getUseNewMapper()) {
String mode = "new map API";
ensureNotSet("mapred.input.format.class", mode);
ensureNotSet(oldMapperClass, mode);
if (numReduces != 0) {
ensureNotSet("mapred.partitioner.class", mode);
} else {
ensureNotSet("mapred.output.format.class", mode);
}
} else {
String mode = "map compatability";
ensureNotSet(INPUT_FORMAT_CLASS_ATTR, mode);
ensureNotSet(MAP_CLASS_ATTR, mode);
if (numReduces != 0) {
ensureNotSet(PARTITIONER_CLASS_ATTR, mode);
} else {
ensureNotSet(OUTPUT_FORMAT_CLASS_ATTR, mode);
}
}
if (numReduces != 0) {
conf.setBooleanIfUnset("mapred.reducer.new-api",
conf.get(oldReduceClass) == null);
if (conf.getUseNewReducer()) {
String mode = "new reduce API";
ensureNotSet("mapred.output.format.class", mode);
ensureNotSet(oldReduceClass, mode);
} else {
String mode = "reduce compatability";
ensureNotSet(OUTPUT_FORMAT_CLASS_ATTR, mode);
ensureNotSet(REDUCE_CLASS_ATTR, mode);
}
}
}
private synchronized void connect()
throws IOException, InterruptedException, ClassNotFoundException {
if (cluster == null) {
cluster =
ugi.doAs(new PrivilegedExceptionAction<Cluster>() {
public Cluster run()
throws IOException, InterruptedException,
ClassNotFoundException {
return new Cluster(getConfiguration());
}
});
}
}
boolean isConnected() {
return cluster != null;
}
/** Only for mocking via unit tests. */
@Private
public JobSubmitter getJobSubmitter(FileSystem fs,
ClientProtocol submitClient) throws IOException {
return new JobSubmitter(fs, submitClient);
}
/**
* Submit the job to the cluster and return immediately.
* @throws IOException
*/
public void submit()
throws IOException, InterruptedException, ClassNotFoundException {
ensureState(JobState.DEFINE);
setUseNewAPI();
connect();
final JobSubmitter submitter =
getJobSubmitter(cluster.getFileSystem(), cluster.getClient());
status = ugi.doAs(new PrivilegedExceptionAction<JobStatus>() {
public JobStatus run() throws IOException, InterruptedException,
ClassNotFoundException {
return submitter.submitJobInternal(Job.this, cluster);
}
});
state = JobState.RUNNING;
LOG.info("The url to track the job: " + getTrackingURL());
}
/**
* Submit the job to the cluster and wait for it to finish.
* @param verbose print the progress to the user
* @return true if the job succeeded
* @throws IOException thrown if the communication with the
* <code>JobTracker</code> is lost
*/
public boolean waitForCompletion(boolean verbose
) throws IOException, InterruptedException,
ClassNotFoundException {
if (state == JobState.DEFINE) {
submit();
}
if (verbose) {
monitorAndPrintJob();
} else {
// get the completion poll interval from the client.
int completionPollIntervalMillis =
Job.getCompletionPollInterval(cluster.getConf());
while (!isComplete()) {
try {
Thread.sleep(completionPollIntervalMillis);
} catch (InterruptedException ie) {
}
}
}
return isSuccessful();
}
/**
* Monitor a job and print status in real-time as progress is made and tasks
* fail.
* @return true if the job succeeded
* @throws IOException if communication to the JobTracker fails
*/
public boolean monitorAndPrintJob()
throws IOException, InterruptedException {
String lastReport = null;
Job.TaskStatusFilter filter;
Configuration clientConf = getConfiguration();
filter = Job.getTaskOutputFilter(clientConf);
JobID jobId = getJobID();
LOG.info("Running job: " + jobId);
int eventCounter = 0;
boolean profiling = getProfileEnabled();
IntegerRanges mapRanges = getProfileTaskRange(true);
IntegerRanges reduceRanges = getProfileTaskRange(false);
int progMonitorPollIntervalMillis =
Job.getProgressPollInterval(clientConf);
/* make sure to report full progress after the job is done */
boolean reportedAfterCompletion = false;
boolean reportedUberMode = false;
while (!isComplete() || !reportedAfterCompletion) {
if (isComplete()) {
reportedAfterCompletion = true;
} else {
Thread.sleep(progMonitorPollIntervalMillis);
}
if (status.getState() == JobStatus.State.PREP) {
continue;
}
if (!reportedUberMode) {
reportedUberMode = true;
LOG.info("Job " + jobId + " running in uber mode : " + isUber());
}
String report =
(" map " + StringUtils.formatPercent(mapProgress(), 0)+
" reduce " +
StringUtils.formatPercent(reduceProgress(), 0));
if (!report.equals(lastReport)) {
LOG.info(report);
lastReport = report;
}
TaskCompletionEvent[] events =
getTaskCompletionEvents(eventCounter, 10);
eventCounter += events.length;
printTaskEvents(events, filter, profiling, mapRanges, reduceRanges);
}
boolean success = isSuccessful();
if (success) {
LOG.info("Job " + jobId + " completed successfully");
} else {
LOG.info("Job " + jobId + " failed with state " + status.getState() +
" due to: " + status.getFailureInfo());
}
Counters counters = getCounters();
if (counters != null) {
LOG.info(counters.toString());
}
return success;
}
private void printTaskEvents(TaskCompletionEvent[] events,
Job.TaskStatusFilter filter, boolean profiling, IntegerRanges mapRanges,
IntegerRanges reduceRanges) throws IOException, InterruptedException {
for (TaskCompletionEvent event : events) {
switch (filter) {
case NONE:
break;
case SUCCEEDED:
if (event.getStatus() ==
TaskCompletionEvent.Status.SUCCEEDED) {
LOG.info(event.toString());
}
break;
case FAILED:
if (event.getStatus() ==
TaskCompletionEvent.Status.FAILED) {
LOG.info(event.toString());
// Displaying the task diagnostic information
TaskAttemptID taskId = event.getTaskAttemptId();
String[] taskDiagnostics = getTaskDiagnostics(taskId);
if (taskDiagnostics != null) {
for (String diagnostics : taskDiagnostics) {
System.err.println(diagnostics);
}
}
}
break;
case KILLED:
if (event.getStatus() == TaskCompletionEvent.Status.KILLED){
LOG.info(event.toString());
}
break;
case ALL:
LOG.info(event.toString());
break;
}
}
}
/** The interval at which monitorAndPrintJob() prints status */
public static int getProgressPollInterval(Configuration conf) {
// Read progress monitor poll interval from config. Default is 1 second.
int progMonitorPollIntervalMillis = conf.getInt(
PROGRESS_MONITOR_POLL_INTERVAL_KEY, DEFAULT_MONITOR_POLL_INTERVAL);
if (progMonitorPollIntervalMillis < 1) {
LOG.warn(PROGRESS_MONITOR_POLL_INTERVAL_KEY +
" has been set to an invalid value; "
+ " replacing with " + DEFAULT_MONITOR_POLL_INTERVAL);
progMonitorPollIntervalMillis = DEFAULT_MONITOR_POLL_INTERVAL;
}
return progMonitorPollIntervalMillis;
}
/** The interval at which waitForCompletion() should check. */
public static int getCompletionPollInterval(Configuration conf) {
int completionPollIntervalMillis = conf.getInt(
COMPLETION_POLL_INTERVAL_KEY, DEFAULT_COMPLETION_POLL_INTERVAL);
if (completionPollIntervalMillis < 1) {
LOG.warn(COMPLETION_POLL_INTERVAL_KEY +
" has been set to an invalid value; "
+ "replacing with " + DEFAULT_COMPLETION_POLL_INTERVAL);
completionPollIntervalMillis = DEFAULT_COMPLETION_POLL_INTERVAL;
}
return completionPollIntervalMillis;
}
/**
* Get the task output filter.
*
* @param conf the configuration.
* @return the filter level.
*/
public static TaskStatusFilter getTaskOutputFilter(Configuration conf) {
return TaskStatusFilter.valueOf(conf.get(Job.OUTPUT_FILTER, "FAILED"));
}
/**
* Modify the Configuration to set the task output filter.
*
* @param conf the Configuration to modify.
* @param newValue the value to set.
*/
public static void setTaskOutputFilter(Configuration conf,
TaskStatusFilter newValue) {
conf.set(Job.OUTPUT_FILTER, newValue.toString());
}
public boolean isUber() throws IOException, InterruptedException {
ensureState(JobState.RUNNING);
updateStatus();
return status.isUber();
}
/**
* Get the reservation to which the job is submitted to, if any
*
* @return the reservationId the identifier of the job's reservation, null if
* the job does not have any reservation associated with it
*/
public ReservationId getReservationId() {
return reservationId;
}
/**
* Set the reservation to which the job is submitted to
*
* @param reservationId the reservationId to set
*/
public void setReservationId(ReservationId reservationId) {
this.reservationId = reservationId;
}
}
| 48,743 | 31.431138 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueAclsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.StringInterner;
/**
* Class to encapsulate Queue ACLs for a particular
* user.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class QueueAclsInfo implements Writable {
private String queueName;
private String[] operations;
/**
* Default constructor for QueueAclsInfo.
*
*/
public QueueAclsInfo() {
}
/**
* Construct a new QueueAclsInfo object using the queue name and the
* queue operations array
*
* @param queueName Name of the job queue
* @param operations
*/
public QueueAclsInfo(String queueName, String[] operations) {
this.queueName = queueName;
this.operations = operations;
}
/**
* Get queue name.
*
* @return name
*/
public String getQueueName() {
return queueName;
}
protected void setQueueName(String queueName) {
this.queueName = queueName;
}
/**
* Get opearations allowed on queue.
*
* @return array of String
*/
public String[] getOperations() {
return operations;
}
@Override
public void readFields(DataInput in) throws IOException {
queueName = StringInterner.weakIntern(Text.readString(in));
operations = WritableUtils.readStringArray(in);
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, queueName);
WritableUtils.writeStringArray(out, operations);
}
}
| 2,582 | 25.90625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableUtils;
/**
* TaskID represents the immutable and unique identifier for
* a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
* execute the Map or Reduce Task, each of which are uniquely indentified by
* their TaskAttemptID.
*
* TaskID consists of 3 parts. First part is the {@link JobID}, that this
* TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
* representing whether the task is a map task or a reduce task.
* And the third part is the task number. <br>
* An example TaskID is :
* <code>task_200707121733_0003_m_000005</code> , which represents the
* fifth map task in the third job running at the jobtracker
* started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse TaskID strings
* , but rather use appropriate constructors or {@link #forName(String)}
* method.
*
* @see JobID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskID extends org.apache.hadoop.mapred.ID {
protected static final String TASK = "task";
protected static final NumberFormat idFormat = NumberFormat.getInstance();
public static final String TASK_ID_REGEX = TASK + "_(\\d+)_(\\d+)_" +
CharTaskTypeMaps.allTaskTypes + "_(\\d+)";
public static final Pattern taskIdPattern = Pattern.compile(TASK_ID_REGEX);
static {
idFormat.setGroupingUsed(false);
idFormat.setMinimumIntegerDigits(6);
}
private JobID jobId;
private TaskType type;
/**
* Constructs a TaskID object from given {@link JobID}.
* @param jobId JobID that this tip belongs to
* @param type the {@link TaskType} of the task
* @param id the tip number
*/
public TaskID(JobID jobId, TaskType type, int id) {
super(id);
if(jobId == null) {
throw new IllegalArgumentException("jobId cannot be null");
}
this.jobId = jobId;
this.type = type;
}
/**
* Constructs a TaskInProgressId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param type the TaskType
* @param id the tip number
*/
public TaskID(String jtIdentifier, int jobId, TaskType type, int id) {
this(new JobID(jtIdentifier, jobId), type, id);
}
/**
* Constructs a TaskID object from given {@link JobID}.
* @param jobId JobID that this tip belongs to
* @param isMap whether the tip is a map
* @param id the tip number
*/
@Deprecated
public TaskID(JobID jobId, boolean isMap, int id) {
this(jobId, isMap ? TaskType.MAP : TaskType.REDUCE, id);
}
/**
* Constructs a TaskInProgressId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param isMap whether the tip is a map
* @param id the tip number
*/
@Deprecated
public TaskID(String jtIdentifier, int jobId, boolean isMap, int id) {
this(new JobID(jtIdentifier, jobId), isMap, id);
}
public TaskID() {
jobId = new JobID();
}
/** Returns the {@link JobID} object that this tip belongs to */
public JobID getJobID() {
return jobId;
}
/**Returns whether this TaskID is a map ID */
@Deprecated
public boolean isMap() {
return type == TaskType.MAP;
}
/**
* Get the type of the task
*/
public TaskType getTaskType() {
return type;
}
@Override
public boolean equals(Object o) {
if (!super.equals(o))
return false;
TaskID that = (TaskID)o;
return this.type == that.type && this.jobId.equals(that.jobId);
}
/**Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
* defined as greater then maps.*/
@Override
public int compareTo(ID o) {
TaskID that = (TaskID)o;
int jobComp = this.jobId.compareTo(that.jobId);
if(jobComp == 0) {
if(this.type == that.type) {
return this.id - that.id;
}
else {
return this.type.compareTo(that.type);
}
}
else return jobComp;
}
@Override
public String toString() {
return appendTo(new StringBuilder(TASK)).toString();
}
/**
* Add the unique string to the given builder.
* @param builder the builder to append to
* @return the builder that was passed in
*/
protected StringBuilder appendTo(StringBuilder builder) {
return jobId.appendTo(builder).
append(SEPARATOR).
append(CharTaskTypeMaps.getRepresentingCharacter(type)).
append(SEPARATOR).
append(idFormat.format(id));
}
@Override
public int hashCode() {
return jobId.hashCode() * 524287 + id;
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
jobId.readFields(in);
type = WritableUtils.readEnum(in, TaskType.class);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
jobId.write(out);
WritableUtils.writeEnum(out, type);
}
/** Construct a TaskID object from given string
* @return constructed TaskID object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static TaskID forName(String str)
throws IllegalArgumentException {
if(str == null)
return null;
Matcher m = taskIdPattern.matcher(str);
if (m.matches()) {
return new org.apache.hadoop.mapred.TaskID(m.group(1),
Integer.parseInt(m.group(2)),
CharTaskTypeMaps.getTaskType(m.group(3).charAt(0)),
Integer.parseInt(m.group(4)));
}
String exceptionMsg = "TaskId string : " + str + " is not properly formed" +
"\nReason: " + m.toString();
throw new IllegalArgumentException(exceptionMsg);
}
/**
* Gets the character representing the {@link TaskType}
* @param type the TaskType
* @return the character
*/
public static char getRepresentingCharacter(TaskType type) {
return CharTaskTypeMaps.getRepresentingCharacter(type);
}
/**
* Gets the {@link TaskType} corresponding to the character
* @param c the character
* @return the TaskType
*/
public static TaskType getTaskType(char c) {
return CharTaskTypeMaps.getTaskType(c);
}
public static String getAllTaskTypes() {
return CharTaskTypeMaps.allTaskTypes;
}
/**
* Maintains the mapping from the character representation of a task type to
* the enum class TaskType constants
*/
static class CharTaskTypeMaps {
private static EnumMap<TaskType, Character> typeToCharMap =
new EnumMap<TaskType,Character>(TaskType.class);
private static Map<Character, TaskType> charToTypeMap =
new HashMap<Character, TaskType>();
static String allTaskTypes = "(m|r|s|c|t)";
static {
setupTaskTypeToCharMapping();
setupCharToTaskTypeMapping();
}
private static void setupTaskTypeToCharMapping() {
typeToCharMap.put(TaskType.MAP, 'm');
typeToCharMap.put(TaskType.REDUCE, 'r');
typeToCharMap.put(TaskType.JOB_SETUP, 's');
typeToCharMap.put(TaskType.JOB_CLEANUP, 'c');
typeToCharMap.put(TaskType.TASK_CLEANUP, 't');
}
private static void setupCharToTaskTypeMapping() {
charToTypeMap.put('m', TaskType.MAP);
charToTypeMap.put('r', TaskType.REDUCE);
charToTypeMap.put('s', TaskType.JOB_SETUP);
charToTypeMap.put('c', TaskType.JOB_CLEANUP);
charToTypeMap.put('t', TaskType.TASK_CLEANUP);
}
static char getRepresentingCharacter(TaskType type) {
return typeToCharMap.get(type);
}
static TaskType getTaskType(char c) {
return charToTypeMap.get(c);
}
}
}
| 8,927 | 30.108014 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.security.Credentials;
/**
* A read-only view of the job that is provided to the tasks while they
* are running.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface JobContext extends MRJobConfig {
/**
* Return the configuration for the job.
* @return the shared configuration object
*/
public Configuration getConfiguration();
/**
* Get credentials for the job.
* @return credentials for the job
*/
public Credentials getCredentials();
/**
* Get the unique ID for the job.
* @return the object with the job id
*/
public JobID getJobID();
/**
* Get configured the number of reduce tasks for this job. Defaults to
* <code>1</code>.
* @return the number of reduce tasks for this job.
*/
public int getNumReduceTasks();
/**
* Get the current working directory for the default file system.
*
* @return the directory name.
*/
public Path getWorkingDirectory() throws IOException;
/**
* Get the key class for the job output data.
* @return the key class for the job output data.
*/
public Class<?> getOutputKeyClass();
/**
* Get the value class for job outputs.
* @return the value class for job outputs.
*/
public Class<?> getOutputValueClass();
/**
* Get the key class for the map output data. If it is not set, use the
* (final) output key class. This allows the map output key class to be
* different than the final output key class.
* @return the map output key class.
*/
public Class<?> getMapOutputKeyClass();
/**
* Get the value class for the map output data. If it is not set, use the
* (final) output value class This allows the map output value class to be
* different than the final output value class.
*
* @return the map output value class.
*/
public Class<?> getMapOutputValueClass();
/**
* Get the user-specified job name. This is only used to identify the
* job to the user.
*
* @return the job's name, defaulting to "".
*/
public String getJobName();
/**
* Get the {@link InputFormat} class for the job.
*
* @return the {@link InputFormat} class for the job.
*/
public Class<? extends InputFormat<?,?>> getInputFormatClass()
throws ClassNotFoundException;
/**
* Get the {@link Mapper} class for the job.
*
* @return the {@link Mapper} class for the job.
*/
public Class<? extends Mapper<?,?,?,?>> getMapperClass()
throws ClassNotFoundException;
/**
* Get the combiner class for the job.
*
* @return the combiner class for the job.
*/
public Class<? extends Reducer<?,?,?,?>> getCombinerClass()
throws ClassNotFoundException;
/**
* Get the {@link Reducer} class for the job.
*
* @return the {@link Reducer} class for the job.
*/
public Class<? extends Reducer<?,?,?,?>> getReducerClass()
throws ClassNotFoundException;
/**
* Get the {@link OutputFormat} class for the job.
*
* @return the {@link OutputFormat} class for the job.
*/
public Class<? extends OutputFormat<?,?>> getOutputFormatClass()
throws ClassNotFoundException;
/**
* Get the {@link Partitioner} class for the job.
*
* @return the {@link Partitioner} class for the job.
*/
public Class<? extends Partitioner<?,?>> getPartitionerClass()
throws ClassNotFoundException;
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator<?> getSortComparator();
/**
* Get the pathname of the job's jar.
* @return the pathname
*/
public String getJar();
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see Job#setCombinerKeyGroupingComparatorClass(Class)
*/
public RawComparator<?> getCombinerKeyGroupingComparator();
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see Job#setGroupingComparatorClass(Class)
* @see #getCombinerKeyGroupingComparator()
*/
public RawComparator<?> getGroupingComparator();
/**
* Get whether job-setup and job-cleanup is needed for the job
*
* @return boolean
*/
public boolean getJobSetupCleanupNeeded();
/**
* Get whether task-cleanup is needed for the job
*
* @return boolean
*/
public boolean getTaskCleanupNeeded();
/**
* Get whether the task profiling is enabled.
* @return true if some tasks will be profiled
*/
public boolean getProfileEnabled();
/**
* Get the profiler configuration arguments.
*
* The default value for this property is
* "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
*
* @return the parameters to pass to the task child to configure profiling
*/
public String getProfileParams();
/**
* Get the range of maps or reduces to profile.
* @param isMap is the task a map?
* @return the task ranges
*/
public IntegerRanges getProfileTaskRange(boolean isMap);
/**
* Get the reported username for this job.
*
* @return the username
*/
public String getUser();
/**
* Originally intended to check if symlinks should be used, but currently
* symlinks cannot be disabled.
* @return true
*/
@Deprecated
public boolean getSymlink();
/**
* Get the archive entries in classpath as an array of Path
*/
public Path[] getArchiveClassPaths();
/**
* Get cache archives set in the Configuration
* @return A URI array of the caches set in the Configuration
* @throws IOException
*/
public URI[] getCacheArchives() throws IOException;
/**
* Get cache files set in the Configuration
* @return A URI array of the files set in the Configuration
* @throws IOException
*/
public URI[] getCacheFiles() throws IOException;
/**
* Return the path array of the localized caches
* @return A path array of localized caches
* @throws IOException
* @deprecated the array returned only includes the items the were
* downloaded. There is no way to map this to what is returned by
* {@link #getCacheArchives()}.
*/
@Deprecated
public Path[] getLocalCacheArchives() throws IOException;
/**
* Return the path array of the localized files
* @return A path array of localized files
* @throws IOException
* @deprecated the array returned only includes the items the were
* downloaded. There is no way to map this to what is returned by
* {@link #getCacheFiles()}.
*/
@Deprecated
public Path[] getLocalCacheFiles() throws IOException;
/**
* Get the file entries in classpath as an array of Path
*/
public Path[] getFileClassPaths();
/**
* Get the timestamps of the archives. Used by internal
* DistributedCache and MapReduce code.
* @return a string array of timestamps
*/
public String[] getArchiveTimestamps();
/**
* Get the timestamps of the files. Used by internal
* DistributedCache and MapReduce code.
* @return a string array of timestamps
*/
public String[] getFileTimestamps();
/**
* Get the configured number of maximum attempts that will be made to run a
* map task, as specified by the <code>mapred.map.max.attempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per map task.
*/
public int getMaxMapAttempts();
/**
* Get the configured number of maximum attempts that will be made to run a
* reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per reduce task.
*/
public int getMaxReduceAttempts();
}
| 9,333 | 28.077882 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.util.Apps;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface MRJobConfig {
// Put all of the attribute names in here so that Job and JobContext are
// consistent.
public static final String INPUT_FORMAT_CLASS_ATTR = "mapreduce.job.inputformat.class";
public static final String MAP_CLASS_ATTR = "mapreduce.job.map.class";
public static final String MAP_OUTPUT_COLLECTOR_CLASS_ATTR
= "mapreduce.job.map.output.collector.class";
public static final String COMBINE_CLASS_ATTR = "mapreduce.job.combine.class";
public static final String REDUCE_CLASS_ATTR = "mapreduce.job.reduce.class";
public static final String OUTPUT_FORMAT_CLASS_ATTR = "mapreduce.job.outputformat.class";
public static final String PARTITIONER_CLASS_ATTR = "mapreduce.job.partitioner.class";
public static final String SETUP_CLEANUP_NEEDED = "mapreduce.job.committer.setup.cleanup.needed";
public static final String TASK_CLEANUP_NEEDED = "mapreduce.job.committer.task.cleanup.needed";
public static final String TASK_PROGRESS_REPORT_INTERVAL =
"mapreduce.task.progress-report.interval";
/** The number of milliseconds between progress reports. */
public static final int DEFAULT_TASK_PROGRESS_REPORT_INTERVAL = 3000;
public static final String JAR = "mapreduce.job.jar";
public static final String ID = "mapreduce.job.id";
public static final String JOB_NAME = "mapreduce.job.name";
public static final String JAR_UNPACK_PATTERN = "mapreduce.job.jar.unpack.pattern";
public static final String USER_NAME = "mapreduce.job.user.name";
public static final String PRIORITY = "mapreduce.job.priority";
public static final String QUEUE_NAME = "mapreduce.job.queuename";
/**
* Node Label expression applicable for all Job containers.
*/
public static final String JOB_NODE_LABEL_EXP = "mapreduce.job.node-label-expression";
/**
* Node Label expression applicable for AM containers.
*/
public static final String AM_NODE_LABEL_EXP = "mapreduce.job.am.node-label-expression";
/**
* Node Label expression applicable for map containers.
*/
public static final String MAP_NODE_LABEL_EXP = "mapreduce.map.node-label-expression";
/**
* Node Label expression applicable for reduce containers.
*/
public static final String REDUCE_NODE_LABEL_EXP = "mapreduce.reduce.node-label-expression";
public static final String RESERVATION_ID = "mapreduce.job.reservation.id";
public static final String JOB_TAGS = "mapreduce.job.tags";
public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
public static final String SPLIT_FILE = "mapreduce.job.splitfile";
public static final String SPLIT_METAINFO_MAXSIZE = "mapreduce.job.split.metainfo.maxsize";
public static final long DEFAULT_SPLIT_METAINFO_MAXSIZE = 10000000L;
public static final String NUM_MAPS = "mapreduce.job.maps";
public static final String MAX_TASK_FAILURES_PER_TRACKER = "mapreduce.job.maxtaskfailures.per.tracker";
public static final String COMPLETED_MAPS_FOR_REDUCE_SLOWSTART = "mapreduce.job.reduce.slowstart.completedmaps";
public static final String NUM_REDUCES = "mapreduce.job.reduces";
public static final String SKIP_RECORDS = "mapreduce.job.skiprecords";
public static final String SKIP_OUTDIR = "mapreduce.job.skip.outdir";
// SPECULATIVE_SLOWNODE_THRESHOLD is obsolete and will be deleted in the future
@Deprecated
public static final String SPECULATIVE_SLOWNODE_THRESHOLD = "mapreduce.job.speculative.slownodethreshold";
public static final String SPECULATIVE_SLOWTASK_THRESHOLD = "mapreduce.job.speculative.slowtaskthreshold";
// SPECULATIVECAP is obsolete and will be deleted in the future
@Deprecated
public static final String SPECULATIVECAP = "mapreduce.job.speculative.speculativecap";
public static final String SPECULATIVECAP_RUNNING_TASKS =
"mapreduce.job.speculative.speculative-cap-running-tasks";
public static final double DEFAULT_SPECULATIVECAP_RUNNING_TASKS =
0.1;
public static final String SPECULATIVECAP_TOTAL_TASKS =
"mapreduce.job.speculative.speculative-cap-total-tasks";
public static final double DEFAULT_SPECULATIVECAP_TOTAL_TASKS =
0.01;
public static final String SPECULATIVE_MINIMUM_ALLOWED_TASKS =
"mapreduce.job.speculative.minimum-allowed-tasks";
public static final int DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS =
10;
public static final String SPECULATIVE_RETRY_AFTER_NO_SPECULATE =
"mapreduce.job.speculative.retry-after-no-speculate";
public static final long DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE =
1000L;
public static final String SPECULATIVE_RETRY_AFTER_SPECULATE =
"mapreduce.job.speculative.retry-after-speculate";
public static final long DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE =
15000L;
public static final String JOB_LOCAL_DIR = "mapreduce.job.local.dir";
public static final String OUTPUT_KEY_CLASS = "mapreduce.job.output.key.class";
public static final String OUTPUT_VALUE_CLASS = "mapreduce.job.output.value.class";
public static final String KEY_COMPARATOR = "mapreduce.job.output.key.comparator.class";
public static final String COMBINER_GROUP_COMPARATOR_CLASS = "mapreduce.job.combiner.group.comparator.class";
public static final String GROUP_COMPARATOR_CLASS = "mapreduce.job.output.group.comparator.class";
public static final String WORKING_DIR = "mapreduce.job.working.dir";
public static final String CLASSPATH_ARCHIVES = "mapreduce.job.classpath.archives";
public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
public static final String CACHE_FILES = "mapreduce.job.cache.files";
public static final String CACHE_ARCHIVES = "mapreduce.job.cache.archives";
public static final String CACHE_FILES_SIZES = "mapreduce.job.cache.files.filesizes"; // internal use only
public static final String CACHE_ARCHIVES_SIZES = "mapreduce.job.cache.archives.filesizes"; // ditto
public static final String CACHE_LOCALFILES = "mapreduce.job.cache.local.files";
public static final String CACHE_LOCALARCHIVES = "mapreduce.job.cache.local.archives";
public static final String CACHE_FILE_TIMESTAMPS = "mapreduce.job.cache.files.timestamps";
public static final String CACHE_ARCHIVES_TIMESTAMPS = "mapreduce.job.cache.archives.timestamps";
public static final String CACHE_FILE_VISIBILITIES = "mapreduce.job.cache.files.visibilities";
public static final String CACHE_ARCHIVES_VISIBILITIES = "mapreduce.job.cache.archives.visibilities";
/**
* @deprecated Symlinks are always on and cannot be disabled.
*/
@Deprecated
public static final String CACHE_SYMLINK = "mapreduce.job.cache.symlink.create";
public static final String USER_LOG_RETAIN_HOURS = "mapreduce.job.userlog.retain.hours";
public static final String MAPREDUCE_JOB_USER_CLASSPATH_FIRST = "mapreduce.job.user.classpath.first";
public static final String MAPREDUCE_JOB_CLASSLOADER = "mapreduce.job.classloader";
/**
* A comma-separated list of services that function as ShuffleProvider aux-services
* (in addition to the built-in ShuffleHandler).
* These services can serve shuffle requests from reducetasks.
*/
public static final String MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES = "mapreduce.job.shuffle.provider.services";
public static final String MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES = "mapreduce.job.classloader.system.classes";
public static final String MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG = "mapreduce.jvm.system-properties-to-log";
public static final String DEFAULT_MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG =
"os.name,os.version,java.home,java.runtime.version,java.vendor," +
"java.version,java.vm.name,java.class.path,java.io.tmpdir,user.dir,user.name";
public static final String IO_SORT_FACTOR = "mapreduce.task.io.sort.factor";
public static final String IO_SORT_MB = "mapreduce.task.io.sort.mb";
public static final String INDEX_CACHE_MEMORY_LIMIT = "mapreduce.task.index.cache.limit.bytes";
public static final String PRESERVE_FAILED_TASK_FILES = "mapreduce.task.files.preserve.failedtasks";
public static final String PRESERVE_FILES_PATTERN = "mapreduce.task.files.preserve.filepattern";
public static final String TASK_DEBUGOUT_LINES = "mapreduce.task.debugout.lines";
public static final String RECORDS_BEFORE_PROGRESS = "mapreduce.task.merge.progress.records";
public static final String SKIP_START_ATTEMPTS = "mapreduce.task.skip.start.attempts";
public static final String TASK_ATTEMPT_ID = "mapreduce.task.attempt.id";
public static final String TASK_ISMAP = "mapreduce.task.ismap";
public static final boolean DEFAULT_TASK_ISMAP = true;
public static final String TASK_PARTITION = "mapreduce.task.partition";
public static final String TASK_PROFILE = "mapreduce.task.profile";
public static final String TASK_PROFILE_PARAMS = "mapreduce.task.profile.params";
public static final String DEFAULT_TASK_PROFILE_PARAMS =
"-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,"
+ "verbose=n,file=%s";
public static final String NUM_MAP_PROFILES = "mapreduce.task.profile.maps";
public static final String NUM_REDUCE_PROFILES = "mapreduce.task.profile.reduces";
public static final String TASK_MAP_PROFILE_PARAMS = "mapreduce.task.profile.map.params";
public static final String TASK_REDUCE_PROFILE_PARAMS = "mapreduce.task.profile.reduce.params";
public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
public static final String TASK_TIMEOUT_CHECK_INTERVAL_MS = "mapreduce.task.timeout.check-interval-ms";
public static final String TASK_EXIT_TIMEOUT = "mapreduce.task.exit.timeout";
public static final int TASK_EXIT_TIMEOUT_DEFAULT = 60 * 1000;
public static final String TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS = "mapreduce.task.exit.timeout.check-interval-ms";
public static final int TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS_DEFAULT = 20 * 1000;
public static final String TASK_ID = "mapreduce.task.id";
public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
public static final String TASK_USERLOG_LIMIT = "mapreduce.task.userlog.limit.kb";
public static final String MAP_SORT_SPILL_PERCENT = "mapreduce.map.sort.spill.percent";
public static final String MAP_INPUT_FILE = "mapreduce.map.input.file";
public static final String MAP_INPUT_PATH = "mapreduce.map.input.length";
public static final String MAP_INPUT_START = "mapreduce.map.input.start";
public static final String MAP_MEMORY_MB = "mapreduce.map.memory.mb";
public static final int DEFAULT_MAP_MEMORY_MB = 1024;
public static final String MAP_CPU_VCORES = "mapreduce.map.cpu.vcores";
public static final int DEFAULT_MAP_CPU_VCORES = 1;
public static final String MAP_ENV = "mapreduce.map.env";
public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts";
public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts";
public static final String MAP_DEBUG_SCRIPT = "mapreduce.map.debug.script";
public static final String MAP_SPECULATIVE = "mapreduce.map.speculative";
public static final String MAP_FAILURES_MAX_PERCENT = "mapreduce.map.failures.maxpercent";
public static final String MAP_SKIP_INCR_PROC_COUNT = "mapreduce.map.skip.proc-count.auto-incr";
public static final String MAP_SKIP_MAX_RECORDS = "mapreduce.map.skip.maxrecords";
public static final String MAP_COMBINE_MIN_SPILLS = "mapreduce.map.combine.minspills";
public static final String MAP_OUTPUT_COMPRESS = "mapreduce.map.output.compress";
public static final String MAP_OUTPUT_COMPRESS_CODEC = "mapreduce.map.output.compress.codec";
public static final String MAP_OUTPUT_KEY_CLASS = "mapreduce.map.output.key.class";
public static final String MAP_OUTPUT_VALUE_CLASS = "mapreduce.map.output.value.class";
public static final String MAP_OUTPUT_KEY_FIELD_SEPERATOR = "mapreduce.map.output.key.field.separator";
public static final String MAP_LOG_LEVEL = "mapreduce.map.log.level";
public static final String REDUCE_LOG_LEVEL = "mapreduce.reduce.log.level";
public static final String DEFAULT_LOG_LEVEL = "INFO";
public static final String REDUCE_MERGE_INMEM_THRESHOLD = "mapreduce.reduce.merge.inmem.threshold";
public static final String REDUCE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.input.buffer.percent";
public static final String REDUCE_MARKRESET_BUFFER_PERCENT = "mapreduce.reduce.markreset.buffer.percent";
public static final String REDUCE_MARKRESET_BUFFER_SIZE = "mapreduce.reduce.markreset.buffer.size";
public static final String REDUCE_MEMORY_MB = "mapreduce.reduce.memory.mb";
public static final int DEFAULT_REDUCE_MEMORY_MB = 1024;
public static final String REDUCE_CPU_VCORES = "mapreduce.reduce.cpu.vcores";
public static final int DEFAULT_REDUCE_CPU_VCORES = 1;
public static final String REDUCE_MEMORY_TOTAL_BYTES = "mapreduce.reduce.memory.totalbytes";
public static final String SHUFFLE_INPUT_BUFFER_PERCENT = "mapreduce.reduce.shuffle.input.buffer.percent";
public static final float DEFAULT_SHUFFLE_INPUT_BUFFER_PERCENT = 0.70f;
public static final String SHUFFLE_MEMORY_LIMIT_PERCENT
= "mapreduce.reduce.shuffle.memory.limit.percent";
public static final String SHUFFLE_MERGE_PERCENT = "mapreduce.reduce.shuffle.merge.percent";
public static final float DEFAULT_SHUFFLE_MERGE_PERCENT = 0.66f;
public static final String REDUCE_FAILURES_MAXPERCENT = "mapreduce.reduce.failures.maxpercent";
public static final String REDUCE_ENV = "mapreduce.reduce.env";
public static final String REDUCE_JAVA_OPTS = "mapreduce.reduce.java.opts";
public static final String MAPREDUCE_JOB_DIR = "mapreduce.job.dir";
public static final String REDUCE_MAX_ATTEMPTS = "mapreduce.reduce.maxattempts";
public static final String SHUFFLE_PARALLEL_COPIES = "mapreduce.reduce.shuffle.parallelcopies";
public static final String REDUCE_DEBUG_SCRIPT = "mapreduce.reduce.debug.script";
public static final String REDUCE_SPECULATIVE = "mapreduce.reduce.speculative";
public static final String SHUFFLE_CONNECT_TIMEOUT = "mapreduce.reduce.shuffle.connect.timeout";
public static final String SHUFFLE_READ_TIMEOUT = "mapreduce.reduce.shuffle.read.timeout";
public static final String SHUFFLE_FETCH_FAILURES = "mapreduce.reduce.shuffle.maxfetchfailures";
public static final String MAX_ALLOWED_FETCH_FAILURES_FRACTION = "mapreduce.reduce.shuffle.max-fetch-failures-fraction";
public static final float DEFAULT_MAX_ALLOWED_FETCH_FAILURES_FRACTION = 0.5f;
public static final String MAX_FETCH_FAILURES_NOTIFICATIONS = "mapreduce.reduce.shuffle.max-fetch-failures-notifications";
public static final int DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS = 3;
public static final String SHUFFLE_FETCH_RETRY_INTERVAL_MS = "mapreduce.reduce.shuffle.fetch.retry.interval-ms";
/** Default interval that fetcher retry to fetch during NM restart.*/
public final static int DEFAULT_SHUFFLE_FETCH_RETRY_INTERVAL_MS = 1000;
public static final String SHUFFLE_FETCH_RETRY_TIMEOUT_MS = "mapreduce.reduce.shuffle.fetch.retry.timeout-ms";
public static final String SHUFFLE_FETCH_RETRY_ENABLED = "mapreduce.reduce.shuffle.fetch.retry.enabled";
public static final String SHUFFLE_NOTIFY_READERROR = "mapreduce.reduce.shuffle.notify.readerror";
public static final String MAX_SHUFFLE_FETCH_RETRY_DELAY = "mapreduce.reduce.shuffle.retry-delay.max.ms";
public static final long DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY = 60000;
public static final String MAX_SHUFFLE_FETCH_HOST_FAILURES = "mapreduce.reduce.shuffle.max-host-failures";
public static final int DEFAULT_MAX_SHUFFLE_FETCH_HOST_FAILURES = 5;
public static final String REDUCE_SKIP_INCR_PROC_COUNT = "mapreduce.reduce.skip.proc-count.auto-incr";
public static final String REDUCE_SKIP_MAXGROUPS = "mapreduce.reduce.skip.maxgroups";
public static final String REDUCE_MEMTOMEM_THRESHOLD = "mapreduce.reduce.merge.memtomem.threshold";
public static final String REDUCE_MEMTOMEM_ENABLED = "mapreduce.reduce.merge.memtomem.enabled";
public static final String COMBINE_RECORDS_BEFORE_PROGRESS = "mapreduce.task.combine.progress.records";
public static final String JOB_NAMENODES = "mapreduce.job.hdfs-servers";
public static final String JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE = "mapreduce.job.hdfs-servers.token-renewal.exclude";
public static final String JOB_JOBTRACKER_ID = "mapreduce.job.kerberos.jtprinicipal";
public static final String JOB_CANCEL_DELEGATION_TOKEN = "mapreduce.job.complete.cancel.delegation.tokens";
public static final String JOB_ACL_VIEW_JOB = "mapreduce.job.acl-view-job";
public static final String DEFAULT_JOB_ACL_VIEW_JOB = " ";
public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job";
public static final String DEFAULT_JOB_ACL_MODIFY_JOB = " ";
public static final String JOB_RUNNING_MAP_LIMIT =
"mapreduce.job.running.map.limit";
public static final int DEFAULT_JOB_RUNNING_MAP_LIMIT = 0;
public static final String JOB_RUNNING_REDUCE_LIMIT =
"mapreduce.job.running.reduce.limit";
public static final int DEFAULT_JOB_RUNNING_REDUCE_LIMIT = 0;
/* config for tracking the local file where all the credentials for the job
* credentials.
*/
public static final String MAPREDUCE_JOB_CREDENTIALS_BINARY =
"mapreduce.job.credentials.binary";
/* Configs for tracking ids of tokens used by a job */
public static final String JOB_TOKEN_TRACKING_IDS_ENABLED =
"mapreduce.job.token.tracking.ids.enabled";
public static final boolean DEFAULT_JOB_TOKEN_TRACKING_IDS_ENABLED = false;
public static final String JOB_TOKEN_TRACKING_IDS =
"mapreduce.job.token.tracking.ids";
public static final String JOB_SUBMITHOST =
"mapreduce.job.submithostname";
public static final String JOB_SUBMITHOSTADDR =
"mapreduce.job.submithostaddress";
public static final String COUNTERS_MAX_KEY = "mapreduce.job.counters.max";
public static final int COUNTERS_MAX_DEFAULT = 120;
public static final String COUNTER_GROUP_NAME_MAX_KEY = "mapreduce.job.counters.group.name.max";
public static final int COUNTER_GROUP_NAME_MAX_DEFAULT = 128;
public static final String COUNTER_NAME_MAX_KEY = "mapreduce.job.counters.counter.name.max";
public static final int COUNTER_NAME_MAX_DEFAULT = 64;
public static final String COUNTER_GROUPS_MAX_KEY = "mapreduce.job.counters.groups.max";
public static final int COUNTER_GROUPS_MAX_DEFAULT = 50;
public static final String JOB_UBERTASK_ENABLE =
"mapreduce.job.ubertask.enable";
public static final String JOB_UBERTASK_MAXMAPS =
"mapreduce.job.ubertask.maxmaps";
public static final String JOB_UBERTASK_MAXREDUCES =
"mapreduce.job.ubertask.maxreduces";
public static final String JOB_UBERTASK_MAXBYTES =
"mapreduce.job.ubertask.maxbytes";
public static final String MAPREDUCE_JOB_EMIT_TIMELINE_DATA =
"mapreduce.job.emit-timeline-data";
public static final boolean DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA =
false;
public static final String MR_PREFIX = "yarn.app.mapreduce.";
public static final String MR_AM_PREFIX = MR_PREFIX + "am.";
/** The number of client retries to the AM - before reconnecting to the RM
* to fetch Application State.
*/
public static final String MR_CLIENT_TO_AM_IPC_MAX_RETRIES =
MR_PREFIX + "client-am.ipc.max-retries";
public static final int DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES = 3;
/** The number of client retries on socket timeouts to the AM - before
* reconnecting to the RM to fetch Application Status.
*/
public static final String MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS =
MR_PREFIX + "client-am.ipc.max-retries-on-timeouts";
public static final int
DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS = 3;
/**
* The number of client retries to the RM/HS before throwing exception.
*/
public static final String MR_CLIENT_MAX_RETRIES =
MR_PREFIX + "client.max-retries";
public static final int DEFAULT_MR_CLIENT_MAX_RETRIES = 3;
/**
* How many times to retry jobclient calls (via getjob)
*/
public static final String MR_CLIENT_JOB_MAX_RETRIES =
MR_PREFIX + "client.job.max-retries";
public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
/**
* How long to wait between jobclient retries on failure
*/
public static final String MR_CLIENT_JOB_RETRY_INTERVAL =
MR_PREFIX + "client.job.retry-interval";
public static final long DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL =
2000;
/** The staging directory for map reduce.*/
public static final String MR_AM_STAGING_DIR =
MR_AM_PREFIX+"staging-dir";
public static final String DEFAULT_MR_AM_STAGING_DIR =
"/tmp/hadoop-yarn/staging";
/** The amount of memory the MR app master needs.*/
public static final String MR_AM_VMEM_MB =
MR_AM_PREFIX+"resource.mb";
public static final int DEFAULT_MR_AM_VMEM_MB = 1536;
/** The number of virtual cores the MR app master needs.*/
public static final String MR_AM_CPU_VCORES =
MR_AM_PREFIX+"resource.cpu-vcores";
public static final int DEFAULT_MR_AM_CPU_VCORES = 1;
/** Command line arguments passed to the MR app master.*/
public static final String MR_AM_COMMAND_OPTS =
MR_AM_PREFIX+"command-opts";
public static final String DEFAULT_MR_AM_COMMAND_OPTS = "-Xmx1024m";
/** Admin command opts passed to the MR app master.*/
public static final String MR_AM_ADMIN_COMMAND_OPTS =
MR_AM_PREFIX+"admin-command-opts";
public static final String DEFAULT_MR_AM_ADMIN_COMMAND_OPTS = "";
/** Root Logging level passed to the MR app master.*/
public static final String MR_AM_LOG_LEVEL =
MR_AM_PREFIX+"log.level";
public static final String DEFAULT_MR_AM_LOG_LEVEL = "INFO";
public static final String MR_AM_LOG_KB =
MR_AM_PREFIX + "container.log.limit.kb";
public static final int DEFAULT_MR_AM_LOG_KB = 0; // don't roll
public static final String MR_AM_LOG_BACKUPS =
MR_AM_PREFIX + "container.log.backups";
public static final int DEFAULT_MR_AM_LOG_BACKUPS = 0;
/**The number of splits when reporting progress in MR*/
public static final String MR_AM_NUM_PROGRESS_SPLITS =
MR_AM_PREFIX+"num-progress-splits";
public static final int DEFAULT_MR_AM_NUM_PROGRESS_SPLITS = 12;
/**
* Upper limit on the number of threads user to launch containers in the app
* master. Expect level config, you shouldn't be needing it in most cases.
*/
public static final String MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT =
MR_AM_PREFIX+"containerlauncher.thread-count-limit";
public static final int DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT =
500;
/**
* The initial size of thread pool to launch containers in the app master
*/
public static final String MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE =
MR_AM_PREFIX+"containerlauncher.threadpool-initial-size";
public static final int DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE =
10;
/** Number of threads to handle job client RPC requests.*/
public static final String MR_AM_JOB_CLIENT_THREAD_COUNT =
MR_AM_PREFIX + "job.client.thread-count";
public static final int DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT = 1;
/**
* Range of ports that the MapReduce AM can use when binding. Leave blank
* if you want all possible ports.
*/
public static final String MR_AM_JOB_CLIENT_PORT_RANGE =
MR_AM_PREFIX + "job.client.port-range";
/** Enable blacklisting of nodes in the job.*/
public static final String MR_AM_JOB_NODE_BLACKLISTING_ENABLE =
MR_AM_PREFIX + "job.node-blacklisting.enable";
/** Ignore blacklisting if a certain percentage of nodes have been blacklisted */
public static final String MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT =
MR_AM_PREFIX + "job.node-blacklisting.ignore-threshold-node-percent";
public static final int DEFAULT_MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERCENT =
33;
/** Enable job recovery.*/
public static final String MR_AM_JOB_RECOVERY_ENABLE =
MR_AM_PREFIX + "job.recovery.enable";
public static final boolean MR_AM_JOB_RECOVERY_ENABLE_DEFAULT = true;
/**
* Limit on the number of reducers that can be preempted to ensure that at
* least one map task can run if it needs to. Percentage between 0.0 and 1.0
*/
public static final String MR_AM_JOB_REDUCE_PREEMPTION_LIMIT =
MR_AM_PREFIX + "job.reduce.preemption.limit";
public static final float DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT = 0.5f;
/** AM ACL disabled. **/
public static final String JOB_AM_ACCESS_DISABLED =
"mapreduce.job.am-access-disabled";
public static final boolean DEFAULT_JOB_AM_ACCESS_DISABLED = false;
/**
* Limit reduces starting until a certain percentage of maps have finished.
* Percentage between 0.0 and 1.0
*/
public static final String MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT =
MR_AM_PREFIX + "job.reduce.rampup.limit";
public static final float DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT = 0.5f;
/** The class that should be used for speculative execution calculations.*/
public static final String MR_AM_JOB_SPECULATOR =
MR_AM_PREFIX + "job.speculator.class";
/** Class used to estimate task resource needs.*/
public static final String MR_AM_TASK_ESTIMATOR =
MR_AM_PREFIX + "job.task.estimator.class";
/** The lambda value in the smoothing function of the task estimator.*/
public static final String MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS =
MR_AM_PREFIX
+ "job.task.estimator.exponential.smooth.lambda-ms";
public static final long DEFAULT_MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS =
1000L * 60;
/** true if the smoothing rate should be exponential.*/
public static final String MR_AM_TASK_ESTIMATOR_EXPONENTIAL_RATE_ENABLE =
MR_AM_PREFIX + "job.task.estimator.exponential.smooth.rate";
/** The number of threads used to handle task RPC calls.*/
public static final String MR_AM_TASK_LISTENER_THREAD_COUNT =
MR_AM_PREFIX + "job.task.listener.thread-count";
public static final int DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT = 30;
/** How often the AM should send heartbeats to the RM.*/
public static final String MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS =
MR_AM_PREFIX + "scheduler.heartbeat.interval-ms";
public static final int DEFAULT_MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS = 1000;
/**
* If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS
* milliseconds before aborting. During this interval, AM will still try
* to contact the RM.
*/
public static final String MR_AM_TO_RM_WAIT_INTERVAL_MS =
MR_AM_PREFIX + "scheduler.connection.wait.interval-ms";
public static final int DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS = 360000;
/**
* How long to wait in milliseconds for the output committer to cancel
* an operation when the job is being killed
*/
public static final String MR_AM_COMMITTER_CANCEL_TIMEOUT_MS =
MR_AM_PREFIX + "job.committer.cancel-timeout";
public static final int DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS =
60 * 1000;
/**
* Defines a time window in milliseconds for output committer operations.
* If contact with the RM has occurred within this window then commit
* operations are allowed, otherwise the AM will not allow output committer
* operations until contact with the RM has been re-established.
*/
public static final String MR_AM_COMMIT_WINDOW_MS =
MR_AM_PREFIX + "job.committer.commit-window";
public static final int DEFAULT_MR_AM_COMMIT_WINDOW_MS = 10 * 1000;
/**
* Boolean. Create the base dirs in the JobHistoryEventHandler
* Set to false for multi-user clusters. This is an internal config that
* is set by the MR framework and read by it too.
*/
public static final String MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR =
MR_AM_PREFIX + "create-intermediate-jh-base-dir";
public static final String MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS =
MR_AM_PREFIX + "history.max-unflushed-events";
public static final int DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS =
200;
public static final String MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER =
MR_AM_PREFIX + "history.job-complete-unflushed-multiplier";
public static final int DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER =
30;
public static final String MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS =
MR_AM_PREFIX + "history.complete-event-flush-timeout";
public static final long DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS =
30 * 1000l;
public static final String MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD =
MR_AM_PREFIX + "history.use-batched-flush.queue-size.threshold";
public static final int DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD =
50;
public static final String MR_AM_HARD_KILL_TIMEOUT_MS =
MR_AM_PREFIX + "hard-kill-timeout-ms";
public static final long DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS =
10 * 1000l;
/**
* The threshold in terms of seconds after which an unsatisfied mapper request
* triggers reducer preemption to free space. Default 0 implies that the reduces
* should be preempted immediately after allocation if there is currently no
* room for newly allocated mappers.
*/
public static final String MR_JOB_REDUCER_PREEMPT_DELAY_SEC =
"mapreduce.job.reducer.preempt.delay.sec";
public static final int DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC = 0;
public static final String MR_AM_ENV =
MR_AM_PREFIX + "env";
public static final String MR_AM_ADMIN_USER_ENV =
MR_AM_PREFIX + "admin.user.env";
public static final String MR_AM_PROFILE = MR_AM_PREFIX + "profile";
public static final boolean DEFAULT_MR_AM_PROFILE = false;
public static final String MR_AM_PROFILE_PARAMS = MR_AM_PREFIX
+ "profile.params";
public static final String MAPRED_MAP_ADMIN_JAVA_OPTS =
"mapreduce.admin.map.child.java.opts";
public static final String MAPRED_REDUCE_ADMIN_JAVA_OPTS =
"mapreduce.admin.reduce.child.java.opts";
public static final String DEFAULT_MAPRED_ADMIN_JAVA_OPTS =
"-Djava.net.preferIPv4Stack=true " +
"-Dhadoop.metrics.log.level=WARN ";
public static final String MAPRED_ADMIN_USER_SHELL =
"mapreduce.admin.user.shell";
public static final String DEFAULT_SHELL = "/bin/bash";
public static final String MAPRED_ADMIN_USER_ENV =
"mapreduce.admin.user.env";
public final String DEFAULT_MAPRED_ADMIN_USER_ENV =
Shell.WINDOWS ?
"PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin":
"LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
public static final String WORKDIR = "work";
public static final String OUTPUT = "output";
public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
// Environment variables used by Pipes. (TODO: these
// do not appear to be used by current pipes source code!)
public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
// This should be the directory where splits file gets localized on the node
// running ApplicationMaster.
public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
// This should be the name of the localized job-configuration file on the node
// running ApplicationMaster and Task
public static final String JOB_CONF_FILE = "job.xml";
// This should be the name of the localized job-jar file on the node running
// individual containers/tasks.
public static final String JOB_JAR = "job.jar";
public static final String JOB_SPLIT = "job.split";
public static final String JOB_SPLIT_METAINFO = "job.splitmetainfo";
public static final String APPLICATION_MASTER_CLASS =
"org.apache.hadoop.mapreduce.v2.app.MRAppMaster";
public static final String MAPREDUCE_V2_CHILD_CLASS =
"org.apache.hadoop.mapred.YarnChild";
public static final String APPLICATION_ATTEMPT_ID =
"mapreduce.job.application.attempt.id";
/**
* Job end notification.
*/
public static final String MR_JOB_END_NOTIFICATION_URL =
"mapreduce.job.end-notification.url";
public static final String MR_JOB_END_NOTIFICATION_PROXY =
"mapreduce.job.end-notification.proxy";
public static final String MR_JOB_END_NOTIFICATION_TIMEOUT =
"mapreduce.job.end-notification.timeout";
public static final String MR_JOB_END_RETRY_ATTEMPTS =
"mapreduce.job.end-notification.retry.attempts";
public static final String MR_JOB_END_RETRY_INTERVAL =
"mapreduce.job.end-notification.retry.interval";
public static final String MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS =
"mapreduce.job.end-notification.max.attempts";
public static final String MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL =
"mapreduce.job.end-notification.max.retry.interval";
public static final int DEFAULT_MR_JOB_END_NOTIFICATION_TIMEOUT =
5000;
/*
* MR AM Service Authorization
*/
public static final String
MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL =
"security.job.task.protocol.acl";
public static final String
MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT =
"security.job.client.protocol.acl";
/**
* CLASSPATH for all YARN MapReduce applications.
*/
public static final String MAPREDUCE_APPLICATION_CLASSPATH =
"mapreduce.application.classpath";
public static final String MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE =
"mapreduce.job.log4j-properties-file";
/**
* Path to MapReduce framework archive
*/
public static final String MAPREDUCE_APPLICATION_FRAMEWORK_PATH =
"mapreduce.application.framework.path";
/**
* Default CLASSPATH for all YARN MapReduce applications constructed with
* platform-agnostic syntax.
*/
@Public
@Unstable
public final String DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH = Apps
.crossPlatformify("HADOOP_MAPRED_HOME")
+ "/share/hadoop/mapreduce/*,"
+ Apps.crossPlatformify("HADOOP_MAPRED_HOME")
+ "/share/hadoop/mapreduce/lib/*";
/**
* Default platform-specific CLASSPATH for all YARN MapReduce applications
* constructed based on client OS syntax.
* <p>
* Note: Use {@link DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH}
* for cross-platform practice i.e. submit an application from a Windows
* client to a Linux/Unix server or vice versa.
* </p>
*/
public final String DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH =
Shell.WINDOWS ? "%HADOOP_MAPRED_HOME%\\share\\hadoop\\mapreduce\\*,"
+ "%HADOOP_MAPRED_HOME%\\share\\hadoop\\mapreduce\\lib\\*"
: "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,"
+ "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*";
public static final String WORKFLOW_ID = "mapreduce.workflow.id";
public static final String TASK_LOG_BACKUPS =
MR_PREFIX + "task.container.log.backups";
public static final int DEFAULT_TASK_LOG_BACKUPS = 0; // don't roll
public static final String REDUCE_SEPARATE_SHUFFLE_LOG =
MR_PREFIX + "shuffle.log.separate";
public static final boolean DEFAULT_REDUCE_SEPARATE_SHUFFLE_LOG = true;
public static final String SHUFFLE_LOG_BACKUPS =
MR_PREFIX + "shuffle.log.backups";
public static final int DEFAULT_SHUFFLE_LOG_BACKUPS = 0; // don't roll
public static final String SHUFFLE_LOG_KB =
MR_PREFIX + "shuffle.log.limit.kb";
public static final long DEFAULT_SHUFFLE_LOG_KB = 0L;
public static final String WORKFLOW_NAME = "mapreduce.workflow.name";
public static final String WORKFLOW_NODE_NAME =
"mapreduce.workflow.node.name";
public static final String WORKFLOW_ADJACENCY_PREFIX_STRING =
"mapreduce.workflow.adjacency.";
public static final String WORKFLOW_ADJACENCY_PREFIX_PATTERN =
"^mapreduce\\.workflow\\.adjacency\\..+";
public static final String WORKFLOW_TAGS = "mapreduce.workflow.tags";
/**
* The maximum number of application attempts.
* It is a application-specific setting.
*/
public static final String MR_AM_MAX_ATTEMPTS = "mapreduce.am.max-attempts";
public static final int DEFAULT_MR_AM_MAX_ATTEMPTS = 2;
public static final String MR_APPLICATION_TYPE = "MAPREDUCE";
public static final String MR_ENCRYPTED_INTERMEDIATE_DATA =
"mapreduce.job.encrypted-intermediate-data";
public static final boolean DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA = false;
public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS =
"mapreduce.job.encrypted-intermediate-data-key-size-bits";
public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS =
128;
public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB =
"mapreduce.job.encrypted-intermediate-data.buffer.kb";
public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB =
128;
}
| 38,480 | 40.827174 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
/**
* A general identifier, which internally stores the id
* as an integer. This is the super class of {@link JobID},
* {@link TaskID} and {@link TaskAttemptID}.
*
* @see JobID
* @see TaskID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ID implements WritableComparable<ID> {
protected static final char SEPARATOR = '_';
protected int id;
/** constructs an ID object from the given int */
public ID(int id) {
this.id = id;
}
protected ID() {
}
/** returns the int which represents the identifier */
public int getId() {
return id;
}
@Override
public String toString() {
return String.valueOf(id);
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if(o == null)
return false;
if (o.getClass() == this.getClass()) {
ID that = (ID) o;
return this.id == that.id;
}
else
return false;
}
/** Compare IDs by associated numbers */
public int compareTo(ID that) {
return this.id - that.id;
}
public void readFields(DataInput in) throws IOException {
this.id = in.readInt();
}
public void write(DataOutput out) throws IOException {
out.writeInt(id);
}
}
| 2,416 | 24.442105 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ContextFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import org.apache.hadoop.conf.Configuration;
/**
* A factory to allow applications to deal with inconsistencies between
* MapReduce Context Objects API between hadoop-0.20 and later versions.
*/
public class ContextFactory {
private static final Constructor<?> JOB_CONTEXT_CONSTRUCTOR;
private static final Constructor<?> TASK_CONTEXT_CONSTRUCTOR;
private static final Constructor<?> MAP_CONTEXT_CONSTRUCTOR;
private static final Constructor<?> MAP_CONTEXT_IMPL_CONSTRUCTOR;
private static final boolean useV21;
private static final Field REPORTER_FIELD;
private static final Field READER_FIELD;
private static final Field WRITER_FIELD;
private static final Field OUTER_MAP_FIELD;
private static final Field WRAPPED_CONTEXT_FIELD;
static {
boolean v21 = true;
final String PACKAGE = "org.apache.hadoop.mapreduce";
try {
Class.forName(PACKAGE + ".task.JobContextImpl");
} catch (ClassNotFoundException cnfe) {
v21 = false;
}
useV21 = v21;
Class<?> jobContextCls;
Class<?> taskContextCls;
Class<?> taskIOContextCls;
Class<?> mapCls;
Class<?> mapContextCls;
Class<?> innerMapContextCls;
try {
if (v21) {
jobContextCls =
Class.forName(PACKAGE+".task.JobContextImpl");
taskContextCls =
Class.forName(PACKAGE+".task.TaskAttemptContextImpl");
taskIOContextCls =
Class.forName(PACKAGE+".task.TaskInputOutputContextImpl");
mapContextCls = Class.forName(PACKAGE + ".task.MapContextImpl");
mapCls = Class.forName(PACKAGE + ".lib.map.WrappedMapper");
innerMapContextCls =
Class.forName(PACKAGE+".lib.map.WrappedMapper$Context");
} else {
jobContextCls =
Class.forName(PACKAGE+".JobContext");
taskContextCls =
Class.forName(PACKAGE+".TaskAttemptContext");
taskIOContextCls =
Class.forName(PACKAGE+".TaskInputOutputContext");
mapContextCls = Class.forName(PACKAGE + ".MapContext");
mapCls = Class.forName(PACKAGE + ".Mapper");
innerMapContextCls =
Class.forName(PACKAGE+".Mapper$Context");
}
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Can't find class", e);
}
try {
JOB_CONTEXT_CONSTRUCTOR =
jobContextCls.getConstructor(Configuration.class, JobID.class);
JOB_CONTEXT_CONSTRUCTOR.setAccessible(true);
TASK_CONTEXT_CONSTRUCTOR =
taskContextCls.getConstructor(Configuration.class,
TaskAttemptID.class);
TASK_CONTEXT_CONSTRUCTOR.setAccessible(true);
if (useV21) {
MAP_CONTEXT_CONSTRUCTOR =
innerMapContextCls.getConstructor(mapCls,
MapContext.class);
MAP_CONTEXT_IMPL_CONSTRUCTOR =
mapContextCls.getDeclaredConstructor(Configuration.class,
TaskAttemptID.class,
RecordReader.class,
RecordWriter.class,
OutputCommitter.class,
StatusReporter.class,
InputSplit.class);
MAP_CONTEXT_IMPL_CONSTRUCTOR.setAccessible(true);
WRAPPED_CONTEXT_FIELD =
innerMapContextCls.getDeclaredField("mapContext");
WRAPPED_CONTEXT_FIELD.setAccessible(true);
} else {
MAP_CONTEXT_CONSTRUCTOR =
innerMapContextCls.getConstructor(mapCls,
Configuration.class,
TaskAttemptID.class,
RecordReader.class,
RecordWriter.class,
OutputCommitter.class,
StatusReporter.class,
InputSplit.class);
MAP_CONTEXT_IMPL_CONSTRUCTOR = null;
WRAPPED_CONTEXT_FIELD = null;
}
MAP_CONTEXT_CONSTRUCTOR.setAccessible(true);
REPORTER_FIELD = taskContextCls.getDeclaredField("reporter");
REPORTER_FIELD.setAccessible(true);
READER_FIELD = mapContextCls.getDeclaredField("reader");
READER_FIELD.setAccessible(true);
WRITER_FIELD = taskIOContextCls.getDeclaredField("output");
WRITER_FIELD.setAccessible(true);
OUTER_MAP_FIELD = innerMapContextCls.getDeclaredField("this$0");
OUTER_MAP_FIELD.setAccessible(true);
} catch (SecurityException e) {
throw new IllegalArgumentException("Can't run constructor ", e);
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException("Can't find constructor ", e);
} catch (NoSuchFieldException e) {
throw new IllegalArgumentException("Can't find field ", e);
}
}
/**
* Clone a {@link JobContext} or {@link TaskAttemptContext} with a
* new configuration.
* @param original the original context
* @param conf the new configuration
* @return a new context object
* @throws InterruptedException
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static JobContext cloneContext(JobContext original,
Configuration conf
) throws IOException,
InterruptedException {
try {
if (original instanceof MapContext<?,?,?,?>) {
return cloneMapContext((Mapper.Context) original, conf, null, null);
} else if (original instanceof ReduceContext<?,?,?,?>) {
throw new IllegalArgumentException("can't clone ReduceContext");
} else if (original instanceof TaskAttemptContext) {
TaskAttemptContext spec = (TaskAttemptContext) original;
return (JobContext)
TASK_CONTEXT_CONSTRUCTOR.newInstance(conf, spec.getTaskAttemptID());
} else {
return (JobContext)
JOB_CONTEXT_CONSTRUCTOR.newInstance(conf, original.getJobID());
}
} catch (InstantiationException e) {
throw new IllegalArgumentException("Can't clone object", e);
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Can't clone object", e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Can't clone object", e);
}
}
/**
* Copy a custom WrappedMapper.Context, optionally replacing
* the input and output.
* @param <K1> input key type
* @param <V1> input value type
* @param <K2> output key type
* @param <V2> output value type
* @param context the context to clone
* @param conf a new configuration
* @param reader Reader to read from. Null means to clone from context.
* @param writer Writer to write to. Null means to clone from context.
* @return a new context. it will not be the same class as the original.
* @throws IOException
* @throws InterruptedException
*/
@SuppressWarnings("unchecked")
public static <K1,V1,K2,V2> Mapper<K1,V1,K2,V2>.Context
cloneMapContext(MapContext<K1,V1,K2,V2> context,
Configuration conf,
RecordReader<K1,V1> reader,
RecordWriter<K2,V2> writer
) throws IOException, InterruptedException {
try {
// get the outer object pointer
Object outer = OUTER_MAP_FIELD.get(context);
// if it is a wrapped 21 context, unwrap it
if ("org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context".equals
(context.getClass().getName())) {
context = (MapContext<K1,V1,K2,V2>) WRAPPED_CONTEXT_FIELD.get(context);
}
// if the reader or writer aren't given, use the same ones
if (reader == null) {
reader = (RecordReader<K1,V1>) READER_FIELD.get(context);
}
if (writer == null) {
writer = (RecordWriter<K2,V2>) WRITER_FIELD.get(context);
}
if (useV21) {
Object basis =
MAP_CONTEXT_IMPL_CONSTRUCTOR.newInstance(conf,
context.getTaskAttemptID(),
reader, writer,
context.getOutputCommitter(),
REPORTER_FIELD.get(context),
context.getInputSplit());
return (Mapper.Context)
MAP_CONTEXT_CONSTRUCTOR.newInstance(outer, basis);
} else {
return (Mapper.Context)
MAP_CONTEXT_CONSTRUCTOR.newInstance(outer,
conf, context.getTaskAttemptID(),
reader, writer,
context.getOutputCommitter(),
REPORTER_FIELD.get(context),
context.getInputSplit());
}
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Can't access field", e);
} catch (InstantiationException e) {
throw new IllegalArgumentException("Can't create object", e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Can't invoke constructor", e);
}
}
}
| 10,565 | 42.303279 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
// Counters used by Task classes
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum TaskCounter {
MAP_INPUT_RECORDS,
MAP_OUTPUT_RECORDS,
MAP_SKIPPED_RECORDS,
MAP_OUTPUT_BYTES,
MAP_OUTPUT_MATERIALIZED_BYTES,
SPLIT_RAW_BYTES,
COMBINE_INPUT_RECORDS,
COMBINE_OUTPUT_RECORDS,
REDUCE_INPUT_GROUPS,
REDUCE_SHUFFLE_BYTES,
REDUCE_INPUT_RECORDS,
REDUCE_OUTPUT_RECORDS,
REDUCE_SKIPPED_GROUPS,
REDUCE_SKIPPED_RECORDS,
SPILLED_RECORDS,
SHUFFLED_MAPS,
FAILED_SHUFFLE,
MERGED_MAP_OUTPUTS,
GC_TIME_MILLIS,
CPU_MILLISECONDS,
PHYSICAL_MEMORY_BYTES,
VIRTUAL_MEMORY_BYTES,
COMMITTED_HEAP_BYTES
}
| 1,606 | 29.903846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.filecache;
import org.apache.hadoop.classification.InterfaceAudience;
| 939 | 43.761905 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.filecache;
import java.io.*;
import java.util.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import java.net.URI;
/**
* Distribute application-specific large, read-only files efficiently.
*
* <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
* framework to cache files (text, archives, jars etc.) needed by applications.
* </p>
*
* <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
* via the {@link org.apache.hadoop.mapred.JobConf}. The
* <code>DistributedCache</code> assumes that the files specified via urls are
* already present on the {@link FileSystem} at the path specified by the url
* and are accessible by every machine in the cluster.</p>
*
* <p>The framework will copy the necessary files on to the slave node before
* any tasks for the job are executed on that node. Its efficiency stems from
* the fact that the files are only copied once per job and the ability to
* cache archives which are un-archived on the slaves.</p>
*
* <p><code>DistributedCache</code> can be used to distribute simple, read-only
* data/text files and/or more complex types such as archives, jars etc.
* Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
* Jars may be optionally added to the classpath of the tasks, a rudimentary
* software distribution mechanism. Files have execution permissions.
* In older version of Hadoop Map/Reduce users could optionally ask for symlinks
* to be created in the working directory of the child task. In the current
* version symlinks are always created. If the URL does not have a fragment
* the name of the file or directory will be used. If multiple files or
* directories map to the same link name, the last one added, will be used. All
* others will not even be downloaded.</p>
*
* <p><code>DistributedCache</code> tracks modification timestamps of the cache
* files. Clearly the cache files should not be modified by the application
* or externally while the job is executing.</p>
*
* <p>Here is an illustrative example on how to use the
* <code>DistributedCache</code>:</p>
* <p><blockquote><pre>
* // Setting up the cache for the application
*
* 1. Copy the requisite files to the <code>FileSystem</code>:
*
* $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
* $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
* $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
* $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
* $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
* $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
*
* 2. Setup the application's <code>JobConf</code>:
*
* JobConf job = new JobConf();
* DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
* job);
* DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
* DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
*
* 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
* or {@link org.apache.hadoop.mapred.Reducer}:
*
* public static class MapClass extends MapReduceBase
* implements Mapper<K, V, K, V> {
*
* private Path[] localArchives;
* private Path[] localFiles;
*
* public void configure(JobConf job) {
* // Get the cached archives/files
* File f = new File("./map.zip/some/file/in/zip.txt");
* }
*
* public void map(K key, V value,
* OutputCollector<K, V> output, Reporter reporter)
* throws IOException {
* // Use data from the cached archives/files here
* // ...
* // ...
* output.collect(k, v);
* }
* }
*
* </pre></blockquote>
*
* It is also very common to use the DistributedCache by using
* {@link org.apache.hadoop.util.GenericOptionsParser}.
*
* This class includes methods that should be used by users
* (specifically those mentioned in the example above, as well
* as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
* as well as methods intended for use by the MapReduce framework
* (e.g., {@link org.apache.hadoop.mapred.JobClient}).
*
* @see org.apache.hadoop.mapred.JobConf
* @see org.apache.hadoop.mapred.JobClient
*/
@Deprecated
@InterfaceAudience.Private
public class DistributedCache {
/**
* Set the configuration with the given set of archives. Intended
* to be used by user code.
* @param archives The list of archives that need to be localized
* @param conf Configuration which will be changed
* @deprecated Use {@link Job#setCacheArchives(URI[])} instead
*/
@Deprecated
public static void setCacheArchives(URI[] archives, Configuration conf) {
String sarchives = StringUtils.uriToString(archives);
conf.set(MRJobConfig.CACHE_ARCHIVES, sarchives);
}
/**
* Set the configuration with the given set of files. Intended to be
* used by user code.
* @param files The list of files that need to be localized
* @param conf Configuration which will be changed
* @deprecated Use {@link Job#setCacheFiles(URI[])} instead
*/
@Deprecated
public static void setCacheFiles(URI[] files, Configuration conf) {
String sfiles = StringUtils.uriToString(files);
conf.set(MRJobConfig.CACHE_FILES, sfiles);
}
/**
* Get cache archives set in the Configuration. Used by
* internal DistributedCache and MapReduce code.
* @param conf The configuration which contains the archives
* @return A URI array of the caches set in the Configuration
* @throws IOException
* @deprecated Use {@link JobContext#getCacheArchives()} instead
*/
@Deprecated
public static URI[] getCacheArchives(Configuration conf) throws IOException {
return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_ARCHIVES));
}
/**
* Get cache files set in the Configuration. Used by internal
* DistributedCache and MapReduce code.
* @param conf The configuration which contains the files
* @return A URI array of the files set in the Configuration
* @throws IOException
* @deprecated Use {@link JobContext#getCacheFiles()} instead
*/
@Deprecated
public static URI[] getCacheFiles(Configuration conf) throws IOException {
return StringUtils.stringToURI(conf.getStrings(MRJobConfig.CACHE_FILES));
}
/**
* Return the path array of the localized caches. Intended to be used
* by user code.
* @param conf Configuration that contains the localized archives
* @return A path array of localized caches
* @throws IOException
* @deprecated Use {@link JobContext#getLocalCacheArchives()} instead
*/
@Deprecated
public static Path[] getLocalCacheArchives(Configuration conf)
throws IOException {
return StringUtils.stringToPath(conf
.getStrings(MRJobConfig.CACHE_LOCALARCHIVES));
}
/**
* Return the path array of the localized files. Intended to be used
* by user code.
* @param conf Configuration that contains the localized files
* @return A path array of localized files
* @throws IOException
* @deprecated Use {@link JobContext#getLocalCacheFiles()} instead
*/
@Deprecated
public static Path[] getLocalCacheFiles(Configuration conf)
throws IOException {
return StringUtils.stringToPath(conf.getStrings(MRJobConfig.CACHE_LOCALFILES));
}
/**
* Parse a list of strings into longs.
* @param strs the list of strings to parse
* @return a list of longs that were parsed. same length as strs.
*/
private static long[] parseTimestamps(String[] strs) {
if (strs == null) {
return null;
}
long[] result = new long[strs.length];
for(int i=0; i < strs.length; ++i) {
result[i] = Long.parseLong(strs[i]);
}
return result;
}
/**
* Get the timestamps of the archives. Used by internal
* DistributedCache and MapReduce code.
* @param conf The configuration which stored the timestamps
* @return a long array of timestamps
* @deprecated Use {@link JobContext#getArchiveTimestamps()} instead
*/
@Deprecated
public static long[] getArchiveTimestamps(Configuration conf) {
return parseTimestamps(
conf.getStrings(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS));
}
/**
* Get the timestamps of the files. Used by internal
* DistributedCache and MapReduce code.
* @param conf The configuration which stored the timestamps
* @return a long array of timestamps
* @deprecated Use {@link JobContext#getFileTimestamps()} instead
*/
@Deprecated
public static long[] getFileTimestamps(Configuration conf) {
return parseTimestamps(
conf.getStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS));
}
/**
* Add a archives to be localized to the conf. Intended to
* be used by user code.
* @param uri The uri of the cache to be localized
* @param conf Configuration to add the cache to
* @deprecated Use {@link Job#addCacheArchive(URI)} instead
*/
@Deprecated
public static void addCacheArchive(URI uri, Configuration conf) {
String archives = conf.get(MRJobConfig.CACHE_ARCHIVES);
conf.set(MRJobConfig.CACHE_ARCHIVES, archives == null ? uri.toString()
: archives + "," + uri.toString());
}
/**
* Add a file to be localized to the conf. Intended
* to be used by user code.
* @param uri The uri of the cache to be localized
* @param conf Configuration to add the cache to
* @deprecated Use {@link Job#addCacheFile(URI)} instead
*/
@Deprecated
public static void addCacheFile(URI uri, Configuration conf) {
String files = conf.get(MRJobConfig.CACHE_FILES);
conf.set(MRJobConfig.CACHE_FILES, files == null ? uri.toString() : files + ","
+ uri.toString());
}
/**
* Add an file path to the current set of classpath entries It adds the file
* to cache as well. Intended to be used by user code.
*
* @param file Path of the file to be added
* @param conf Configuration that contains the classpath setting
* @deprecated Use {@link Job#addFileToClassPath(Path)} instead
*/
@Deprecated
public static void addFileToClassPath(Path file, Configuration conf)
throws IOException {
addFileToClassPath(file, conf, file.getFileSystem(conf));
}
/**
* Add a file path to the current set of classpath entries. It adds the file
* to cache as well. Intended to be used by user code.
*
* @param file Path of the file to be added
* @param conf Configuration that contains the classpath setting
* @param fs FileSystem with respect to which {@code archivefile} should
* be interpreted.
*/
public static void addFileToClassPath
(Path file, Configuration conf, FileSystem fs)
throws IOException {
String classpath = conf.get(MRJobConfig.CLASSPATH_FILES);
conf.set(MRJobConfig.CLASSPATH_FILES, classpath == null ? file.toString()
: classpath + "," + file.toString());
URI uri = fs.makeQualified(file).toUri();
addCacheFile(uri, conf);
}
/**
* Get the file entries in classpath as an array of Path.
* Used by internal DistributedCache code.
*
* @param conf Configuration that contains the classpath setting
* @deprecated Use {@link JobContext#getFileClassPaths()} instead
*/
@Deprecated
public static Path[] getFileClassPaths(Configuration conf) {
ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
MRJobConfig.CLASSPATH_FILES);
if (list.size() == 0) {
return null;
}
Path[] paths = new Path[list.size()];
for (int i = 0; i < list.size(); i++) {
paths[i] = new Path(list.get(i));
}
return paths;
}
/**
* Add an archive path to the current set of classpath entries. It adds the
* archive to cache as well. Intended to be used by user code.
*
* @param archive Path of the archive to be added
* @param conf Configuration that contains the classpath setting
* @deprecated Use {@link Job#addArchiveToClassPath(Path)} instead
*/
@Deprecated
public static void addArchiveToClassPath(Path archive, Configuration conf)
throws IOException {
addArchiveToClassPath(archive, conf, archive.getFileSystem(conf));
}
/**
* Add an archive path to the current set of classpath entries. It adds the
* archive to cache as well. Intended to be used by user code.
*
* @param archive Path of the archive to be added
* @param conf Configuration that contains the classpath setting
* @param fs FileSystem with respect to which {@code archive} should be interpreted.
*/
public static void addArchiveToClassPath
(Path archive, Configuration conf, FileSystem fs)
throws IOException {
String classpath = conf.get(MRJobConfig.CLASSPATH_ARCHIVES);
conf.set(MRJobConfig.CLASSPATH_ARCHIVES, classpath == null ? archive
.toString() : classpath + "," + archive.toString());
URI uri = fs.makeQualified(archive).toUri();
addCacheArchive(uri, conf);
}
/**
* Get the archive entries in classpath as an array of Path.
* Used by internal DistributedCache code.
*
* @param conf Configuration that contains the classpath setting
* @deprecated Use {@link JobContext#getArchiveClassPaths()} instead
*/
@Deprecated
public static Path[] getArchiveClassPaths(Configuration conf) {
ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
MRJobConfig.CLASSPATH_ARCHIVES);
if (list.size() == 0) {
return null;
}
Path[] paths = new Path[list.size()];
for (int i = 0; i < list.size(); i++) {
paths[i] = new Path(list.get(i));
}
return paths;
}
/**
* Originally intended to enable symlinks, but currently symlinks cannot be
* disabled. This is a NO-OP.
* @param conf the jobconf
* @deprecated This is a NO-OP.
*/
@Deprecated
public static void createSymlink(Configuration conf){
//NOOP
}
/**
* Originally intended to check if symlinks should be used, but currently
* symlinks cannot be disabled.
* @param conf the jobconf
* @return true
* @deprecated symlinks are always created.
*/
@Deprecated
public static boolean getSymlink(Configuration conf){
return true;
}
private static boolean[] parseBooleans(String[] strs) {
if (null == strs) {
return null;
}
boolean[] result = new boolean[strs.length];
for(int i=0; i < strs.length; ++i) {
result[i] = Boolean.parseBoolean(strs[i]);
}
return result;
}
/**
* Get the booleans on whether the files are public or not. Used by
* internal DistributedCache and MapReduce code.
* @param conf The configuration which stored the timestamps
* @return a string array of booleans
*/
public static boolean[] getFileVisibilities(Configuration conf) {
return parseBooleans(conf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES));
}
/**
* Get the booleans on whether the archives are public or not. Used by
* internal DistributedCache and MapReduce code.
* @param conf The configuration which stored the timestamps
* @return a string array of booleans
*/
public static boolean[] getArchiveVisibilities(Configuration conf) {
return parseBooleans(conf.getStrings(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES));
}
/**
* This method checks if there is a conflict in the fragment names
* of the uris. Also makes sure that each uri has a fragment. It
* is only to be called if you want to create symlinks for
* the various archives and files. May be used by user code.
* @param uriFiles The uri array of urifiles
* @param uriArchives the uri array of uri archives
*/
public static boolean checkURIs(URI[] uriFiles, URI[] uriArchives) {
if ((uriFiles == null) && (uriArchives == null)) {
return true;
}
// check if fragment is null for any uri
// also check if there are any conflicts in fragment names
Set<String> fragments = new HashSet<String>();
// iterate over file uris
if (uriFiles != null) {
for (int i = 0; i < uriFiles.length; i++) {
String fragment = uriFiles[i].getFragment();
if (fragment == null) {
return false;
}
String lowerCaseFragment = StringUtils.toLowerCase(fragment);
if (fragments.contains(lowerCaseFragment)) {
return false;
}
fragments.add(lowerCaseFragment);
}
}
// iterate over archive uris
if (uriArchives != null) {
for (int i = 0; i < uriArchives.length; i++) {
String fragment = uriArchives[i].getFragment();
if (fragment == null) {
return false;
}
String lowerCaseFragment = StringUtils.toLowerCase(fragment);
if (fragments.contains(lowerCaseFragment)) {
return false;
}
fragments.add(lowerCaseFragment);
}
}
return true;
}
}
| 18,610 | 36.296593 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.filecache;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.security.Credentials;
/**
* Manages internal configuration of the cache by the client for job submission.
*/
@InterfaceAudience.Private
public class ClientDistributedCacheManager {
/**
* Determines timestamps of files to be cached, and stores those
* in the configuration. Determines the visibilities of the distributed cache
* files and archives. The visibility of a cache path is "public" if the leaf
* component has READ permissions for others, and the parent subdirs have
* EXECUTE permissions for others.
*
* This is an internal method!
*
* @param job
* @throws IOException
*/
public static void determineTimestampsAndCacheVisibilities(Configuration job)
throws IOException {
Map<URI, FileStatus> statCache = new HashMap<URI, FileStatus>();
determineTimestamps(job, statCache);
determineCacheVisibilities(job, statCache);
}
/**
* Determines timestamps of files to be cached, and stores those
* in the configuration. This is intended to be used internally by JobClient
* after all cache files have been added.
*
* This is an internal method!
*
* @param job Configuration of a job.
* @throws IOException
*/
public static void determineTimestamps(Configuration job,
Map<URI, FileStatus> statCache) throws IOException {
URI[] tarchives = DistributedCache.getCacheArchives(job);
if (tarchives != null) {
FileStatus status = getFileStatus(job, tarchives[0], statCache);
StringBuilder archiveFileSizes =
new StringBuilder(String.valueOf(status.getLen()));
StringBuilder archiveTimestamps =
new StringBuilder(String.valueOf(status.getModificationTime()));
for (int i = 1; i < tarchives.length; i++) {
status = getFileStatus(job, tarchives[i], statCache);
archiveFileSizes.append(",");
archiveFileSizes.append(String.valueOf(status.getLen()));
archiveTimestamps.append(",");
archiveTimestamps.append(String.valueOf(status.getModificationTime()));
}
job.set(MRJobConfig.CACHE_ARCHIVES_SIZES, archiveFileSizes.toString());
setArchiveTimestamps(job, archiveTimestamps.toString());
}
URI[] tfiles = DistributedCache.getCacheFiles(job);
if (tfiles != null) {
FileStatus status = getFileStatus(job, tfiles[0], statCache);
StringBuilder fileSizes =
new StringBuilder(String.valueOf(status.getLen()));
StringBuilder fileTimestamps = new StringBuilder(String.valueOf(
status.getModificationTime()));
for (int i = 1; i < tfiles.length; i++) {
status = getFileStatus(job, tfiles[i], statCache);
fileSizes.append(",");
fileSizes.append(String.valueOf(status.getLen()));
fileTimestamps.append(",");
fileTimestamps.append(String.valueOf(status.getModificationTime()));
}
job.set(MRJobConfig.CACHE_FILES_SIZES, fileSizes.toString());
setFileTimestamps(job, fileTimestamps.toString());
}
}
/**
* For each archive or cache file - get the corresponding delegation token
* @param job
* @param credentials
* @throws IOException
*/
public static void getDelegationTokens(Configuration job,
Credentials credentials) throws IOException {
URI[] tarchives = DistributedCache.getCacheArchives(job);
URI[] tfiles = DistributedCache.getCacheFiles(job);
int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
Path[] ps = new Path[size];
int i = 0;
if (tarchives != null) {
for (i=0; i < tarchives.length; i++) {
ps[i] = new Path(tarchives[i].toString());
}
}
if (tfiles != null) {
for(int j=0; j< tfiles.length; j++) {
ps[i+j] = new Path(tfiles[j].toString());
}
}
TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
/**
* Determines the visibilities of the distributed cache files and
* archives. The visibility of a cache path is "public" if the leaf component
* has READ permissions for others, and the parent subdirs have
* EXECUTE permissions for others
* @param job
* @throws IOException
*/
public static void determineCacheVisibilities(Configuration job,
Map<URI, FileStatus> statCache) throws IOException {
URI[] tarchives = DistributedCache.getCacheArchives(job);
if (tarchives != null) {
StringBuilder archiveVisibilities =
new StringBuilder(String.valueOf(isPublic(job, tarchives[0], statCache)));
for (int i = 1; i < tarchives.length; i++) {
archiveVisibilities.append(",");
archiveVisibilities.append(String.valueOf(isPublic(job, tarchives[i], statCache)));
}
setArchiveVisibilities(job, archiveVisibilities.toString());
}
URI[] tfiles = DistributedCache.getCacheFiles(job);
if (tfiles != null) {
StringBuilder fileVisibilities =
new StringBuilder(String.valueOf(isPublic(job, tfiles[0], statCache)));
for (int i = 1; i < tfiles.length; i++) {
fileVisibilities.append(",");
fileVisibilities.append(String.valueOf(isPublic(job, tfiles[i], statCache)));
}
setFileVisibilities(job, fileVisibilities.toString());
}
}
/**
* This is to check the public/private visibility of the archives to be
* localized.
*
* @param conf Configuration which stores the timestamp's
* @param booleans comma separated list of booleans (true - public)
* The order should be the same as the order in which the archives are added.
*/
static void setArchiveVisibilities(Configuration conf, String booleans) {
conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, booleans);
}
/**
* This is to check the public/private visibility of the files to be localized
*
* @param conf Configuration which stores the timestamp's
* @param booleans comma separated list of booleans (true - public)
* The order should be the same as the order in which the files are added.
*/
static void setFileVisibilities(Configuration conf, String booleans) {
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, booleans);
}
/**
* This is to check the timestamp of the archives to be localized.
*
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of archives.
* The order should be the same as the order in which the archives are added.
*/
static void setArchiveTimestamps(Configuration conf, String timestamps) {
conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, timestamps);
}
/**
* This is to check the timestamp of the files to be localized.
*
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of files.
* The order should be the same as the order in which the files are added.
*/
static void setFileTimestamps(Configuration conf, String timestamps) {
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
/**
* Gets the file status for the given URI. If the URI is in the cache,
* returns it. Otherwise, fetches it and adds it to the cache.
*/
private static FileStatus getFileStatus(Configuration job, URI uri,
Map<URI, FileStatus> statCache) throws IOException {
FileSystem fileSystem = FileSystem.get(uri, job);
return getFileStatus(fileSystem, uri, statCache);
}
/**
* Returns a boolean to denote whether a cache file is visible to all(public)
* or not
* @param conf
* @param uri
* @return true if the path in the uri is visible to all, false otherwise
* @throws IOException
*/
static boolean isPublic(Configuration conf, URI uri,
Map<URI, FileStatus> statCache) throws IOException {
FileSystem fs = FileSystem.get(uri, conf);
Path current = new Path(uri.getPath());
current = fs.makeQualified(current);
//the leaf level file should be readable by others
if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
return false;
}
return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
/**
* Returns true if all ancestors of the specified path have the 'execute'
* permission set for all users (i.e. that other users can traverse
* the directory heirarchy to the given path)
*/
static boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path,
Map<URI, FileStatus> statCache) throws IOException {
Path current = path;
while (current != null) {
//the subdirs in the path should have execute permissions for others
if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
return false;
}
current = current.getParent();
}
return true;
}
/**
* Checks for a given path whether the Other permissions on it
* imply the permission in the passed FsAction
* @param fs
* @param path
* @param action
* @return true if the path in the uri is visible to all, false otherwise
* @throws IOException
*/
private static boolean checkPermissionOfOther(FileSystem fs, Path path,
FsAction action, Map<URI, FileStatus> statCache) throws IOException {
FileStatus status = getFileStatus(fs, path.toUri(), statCache);
FsPermission perms = status.getPermission();
FsAction otherAction = perms.getOtherAction();
if (otherAction.implies(action)) {
return true;
}
return false;
}
private static FileStatus getFileStatus(FileSystem fs, URI uri,
Map<URI, FileStatus> statCache) throws IOException {
FileStatus stat = statCache.get(uri);
if (stat == null) {
stat = fs.getFileStatus(new Path(uri));
statCache.put(uri, stat);
}
return stat;
}
}
| 11,181 | 36.905085 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.tools;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TIPStatus;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.jobhistory.HistoryViewer;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
import com.google.common.base.Charsets;
/**
* Interprets the map reduce cli options
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CLI extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(CLI.class);
protected Cluster cluster;
private static final Set<String> taskTypes = new HashSet<String>(
Arrays.asList("MAP", "REDUCE"));
private final Set<String> taskStates = new HashSet<String>(Arrays.asList(
"running", "completed", "pending", "failed", "killed"));
public CLI() {
}
public CLI(Configuration conf) {
setConf(conf);
}
public int run(String[] argv) throws Exception {
int exitCode = -1;
if (argv.length < 1) {
displayUsage("");
return exitCode;
}
// process arguments
String cmd = argv[0];
String submitJobFile = null;
String jobid = null;
String taskid = null;
String historyFile = null;
String counterGroupName = null;
String counterName = null;
JobPriority jp = null;
String taskType = null;
String taskState = null;
int fromEvent = 0;
int nEvents = 0;
boolean getStatus = false;
boolean getCounter = false;
boolean killJob = false;
boolean listEvents = false;
boolean viewHistory = false;
boolean viewAllHistory = false;
boolean listJobs = false;
boolean listAllJobs = false;
boolean listActiveTrackers = false;
boolean listBlacklistedTrackers = false;
boolean displayTasks = false;
boolean killTask = false;
boolean failTask = false;
boolean setJobPriority = false;
boolean logs = false;
if ("-submit".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
submitJobFile = argv[1];
} else if ("-status".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
getStatus = true;
} else if("-counter".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
getCounter = true;
jobid = argv[1];
counterGroupName = argv[2];
counterName = argv[3];
} else if ("-kill".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
killJob = true;
} else if ("-set-priority".equals(cmd)) {
if (argv.length != 3) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
try {
jp = JobPriority.valueOf(argv[2]);
} catch (IllegalArgumentException iae) {
LOG.info(iae);
displayUsage(cmd);
return exitCode;
}
setJobPriority = true;
} else if ("-events".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
fromEvent = Integer.parseInt(argv[2]);
nEvents = Integer.parseInt(argv[3]);
listEvents = true;
} else if ("-history".equals(cmd)) {
if (argv.length != 2 && !(argv.length == 3 && "all".equals(argv[1]))) {
displayUsage(cmd);
return exitCode;
}
viewHistory = true;
if (argv.length == 3 && "all".equals(argv[1])) {
viewAllHistory = true;
historyFile = argv[2];
} else {
historyFile = argv[1];
}
} else if ("-list".equals(cmd)) {
if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1]))) {
displayUsage(cmd);
return exitCode;
}
if (argv.length == 2 && "all".equals(argv[1])) {
listAllJobs = true;
} else {
listJobs = true;
}
} else if("-kill-task".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
killTask = true;
taskid = argv[1];
} else if("-fail-task".equals(cmd)) {
if (argv.length != 2) {
displayUsage(cmd);
return exitCode;
}
failTask = true;
taskid = argv[1];
} else if ("-list-active-trackers".equals(cmd)) {
if (argv.length != 1) {
displayUsage(cmd);
return exitCode;
}
listActiveTrackers = true;
} else if ("-list-blacklisted-trackers".equals(cmd)) {
if (argv.length != 1) {
displayUsage(cmd);
return exitCode;
}
listBlacklistedTrackers = true;
} else if ("-list-attempt-ids".equals(cmd)) {
if (argv.length != 4) {
displayUsage(cmd);
return exitCode;
}
jobid = argv[1];
taskType = argv[2];
taskState = argv[3];
displayTasks = true;
if (!taskTypes.contains(
org.apache.hadoop.util.StringUtils.toUpperCase(taskType))) {
System.out.println("Error: Invalid task-type: " + taskType);
displayUsage(cmd);
return exitCode;
}
if (!taskStates.contains(
org.apache.hadoop.util.StringUtils.toLowerCase(taskState))) {
System.out.println("Error: Invalid task-state: " + taskState);
displayUsage(cmd);
return exitCode;
}
} else if ("-logs".equals(cmd)) {
if (argv.length == 2 || argv.length ==3) {
logs = true;
jobid = argv[1];
if (argv.length == 3) {
taskid = argv[2];
} else {
taskid = null;
}
} else {
displayUsage(cmd);
return exitCode;
}
} else {
displayUsage(cmd);
return exitCode;
}
// initialize cluster
cluster = createCluster();
// Submit the request
try {
if (submitJobFile != null) {
Job job = Job.getInstance(new JobConf(submitJobFile));
job.submit();
System.out.println("Created job " + job.getJobID());
exitCode = 0;
} else if (getStatus) {
Job job = cluster.getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
Counters counters = job.getCounters();
System.out.println();
System.out.println(job);
if (counters != null) {
System.out.println(counters);
} else {
System.out.println("Counters not available. Job is retired.");
}
exitCode = 0;
}
} else if (getCounter) {
Job job = cluster.getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
Counters counters = job.getCounters();
if (counters == null) {
System.out.println("Counters not available for retired job " +
jobid);
exitCode = -1;
} else {
System.out.println(getCounter(counters,
counterGroupName, counterName));
exitCode = 0;
}
}
} else if (killJob) {
Job job = cluster.getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
JobStatus jobStatus = job.getStatus();
if (jobStatus.getState() == JobStatus.State.FAILED) {
System.out.println("Could not mark the job " + jobid
+ " as killed, as it has already failed.");
exitCode = -1;
} else if (jobStatus.getState() == JobStatus.State.KILLED) {
System.out
.println("The job " + jobid + " has already been killed.");
exitCode = -1;
} else if (jobStatus.getState() == JobStatus.State.SUCCEEDED) {
System.out.println("Could not kill the job " + jobid
+ ", as it has already succeeded.");
exitCode = -1;
} else {
job.killJob();
System.out.println("Killed job " + jobid);
exitCode = 0;
}
}
} else if (setJobPriority) {
Job job = cluster.getJob(JobID.forName(jobid));
if (job == null) {
System.out.println("Could not find job " + jobid);
} else {
job.setPriority(jp);
System.out.println("Changed job priority.");
exitCode = 0;
}
} else if (viewHistory) {
viewHistory(historyFile, viewAllHistory);
exitCode = 0;
} else if (listEvents) {
listEvents(cluster.getJob(JobID.forName(jobid)), fromEvent, nEvents);
exitCode = 0;
} else if (listJobs) {
listJobs(cluster);
exitCode = 0;
} else if (listAllJobs) {
listAllJobs(cluster);
exitCode = 0;
} else if (listActiveTrackers) {
listActiveTrackers(cluster);
exitCode = 0;
} else if (listBlacklistedTrackers) {
listBlacklistedTrackers(cluster);
exitCode = 0;
} else if (displayTasks) {
displayTasks(cluster.getJob(JobID.forName(jobid)), taskType, taskState);
exitCode = 0;
} else if(killTask) {
TaskAttemptID taskID = TaskAttemptID.forName(taskid);
Job job = cluster.getJob(taskID.getJobID());
if (job == null) {
System.out.println("Could not find job " + jobid);
} else if (job.killTask(taskID, false)) {
System.out.println("Killed task " + taskid);
exitCode = 0;
} else {
System.out.println("Could not kill task " + taskid);
exitCode = -1;
}
} else if(failTask) {
TaskAttemptID taskID = TaskAttemptID.forName(taskid);
Job job = cluster.getJob(taskID.getJobID());
if (job == null) {
System.out.println("Could not find job " + jobid);
} else if(job.killTask(taskID, true)) {
System.out.println("Killed task " + taskID + " by failing it");
exitCode = 0;
} else {
System.out.println("Could not fail task " + taskid);
exitCode = -1;
}
} else if (logs) {
try {
JobID jobID = JobID.forName(jobid);
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
LogCLIHelpers logDumper = new LogCLIHelpers();
logDumper.setConf(getConf());
exitCode = logDumper.dumpAContainersLogs(logParams.getApplicationId(),
logParams.getContainerId(), logParams.getNodeId(),
logParams.getOwner());
} catch (IOException e) {
if (e instanceof RemoteException) {
throw e;
}
System.out.println(e.getMessage());
}
}
} catch (RemoteException re) {
IOException unwrappedException = re.unwrapRemoteException();
if (unwrappedException instanceof AccessControlException) {
System.out.println(unwrappedException.getMessage());
} else {
throw re;
}
} finally {
cluster.close();
}
return exitCode;
}
Cluster createCluster() throws IOException {
return new Cluster(getConf());
}
private String getJobPriorityNames() {
StringBuffer sb = new StringBuffer();
for (JobPriority p : JobPriority.values()) {
sb.append(p.name()).append(" ");
}
return sb.substring(0, sb.length()-1);
}
private String getTaskTypes() {
return StringUtils.join(taskTypes, " ");
}
/**
* Display usage of the command-line tool and terminate execution.
*/
private void displayUsage(String cmd) {
String prefix = "Usage: job ";
String jobPriorityValues = getJobPriorityNames();
String taskStates = "running, completed";
if ("-submit".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " <job-file>]");
} else if ("-status".equals(cmd) || "-kill".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " <job-id>]");
} else if ("-counter".equals(cmd)) {
System.err.println(prefix + "[" + cmd +
" <job-id> <group-name> <counter-name>]");
} else if ("-events".equals(cmd)) {
System.err.println(prefix + "[" + cmd +
" <job-id> <from-event-#> <#-of-events>]. Event #s start from 1.");
} else if ("-history".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " <jobHistoryFile>]");
} else if ("-list".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " [all]]");
} else if ("-kill-task".equals(cmd) || "-fail-task".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " <task-attempt-id>]");
} else if ("-set-priority".equals(cmd)) {
System.err.println(prefix + "[" + cmd + " <job-id> <priority>]. " +
"Valid values for priorities are: "
+ jobPriorityValues);
} else if ("-list-active-trackers".equals(cmd)) {
System.err.println(prefix + "[" + cmd + "]");
} else if ("-list-blacklisted-trackers".equals(cmd)) {
System.err.println(prefix + "[" + cmd + "]");
} else if ("-list-attempt-ids".equals(cmd)) {
System.err.println(prefix + "[" + cmd +
" <job-id> <task-type> <task-state>]. " +
"Valid values for <task-type> are " + getTaskTypes() + ". " +
"Valid values for <task-state> are " + taskStates);
} else if ("-logs".equals(cmd)) {
System.err.println(prefix + "[" + cmd +
" <job-id> <task-attempt-id>]. " +
" <task-attempt-id> is optional to get task attempt logs.");
} else {
System.err.printf(prefix + "<command> <args>%n");
System.err.printf("\t[-submit <job-file>]%n");
System.err.printf("\t[-status <job-id>]%n");
System.err.printf("\t[-counter <job-id> <group-name> <counter-name>]%n");
System.err.printf("\t[-kill <job-id>]%n");
System.err.printf("\t[-set-priority <job-id> <priority>]. " +
"Valid values for priorities are: " + jobPriorityValues + "%n");
System.err.printf("\t[-events <job-id> <from-event-#> <#-of-events>]%n");
System.err.printf("\t[-history <jobHistoryFile>]%n");
System.err.printf("\t[-list [all]]%n");
System.err.printf("\t[-list-active-trackers]%n");
System.err.printf("\t[-list-blacklisted-trackers]%n");
System.err.println("\t[-list-attempt-ids <job-id> <task-type> " +
"<task-state>]. " +
"Valid values for <task-type> are " + getTaskTypes() + ". " +
"Valid values for <task-state> are " + taskStates);
System.err.printf("\t[-kill-task <task-attempt-id>]%n");
System.err.printf("\t[-fail-task <task-attempt-id>]%n");
System.err.printf("\t[-logs <job-id> <task-attempt-id>]%n%n");
ToolRunner.printGenericCommandUsage(System.out);
}
}
private void viewHistory(String historyFile, boolean all)
throws IOException {
HistoryViewer historyViewer = new HistoryViewer(historyFile,
getConf(), all);
historyViewer.print();
}
protected long getCounter(Counters counters, String counterGroupName,
String counterName) throws IOException {
return counters.findCounter(counterGroupName, counterName).getValue();
}
/**
* List the events for the given job
* @param jobId the job id for the job's events to list
* @throws IOException
*/
private void listEvents(Job job, int fromEventId, int numEvents)
throws IOException, InterruptedException {
TaskCompletionEvent[] events = job.
getTaskCompletionEvents(fromEventId, numEvents);
System.out.println("Task completion events for " + job.getJobID());
System.out.println("Number of events (from " + fromEventId + ") are: "
+ events.length);
for(TaskCompletionEvent event: events) {
System.out.println(event.getStatus() + " " +
event.getTaskAttemptId() + " " +
getTaskLogURL(event.getTaskAttemptId(), event.getTaskTrackerHttp()));
}
}
protected static String getTaskLogURL(TaskAttemptID taskId, String baseUrl) {
return (baseUrl + "/tasklog?plaintext=true&attemptid=" + taskId);
}
/**
* Dump a list of currently running jobs
* @throws IOException
*/
private void listJobs(Cluster cluster)
throws IOException, InterruptedException {
List<JobStatus> runningJobs = new ArrayList<JobStatus>();
for (JobStatus job : cluster.getAllJobStatuses()) {
if (!job.isJobComplete()) {
runningJobs.add(job);
}
}
displayJobList(runningJobs.toArray(new JobStatus[0]));
}
/**
* Dump a list of all jobs submitted.
* @throws IOException
*/
private void listAllJobs(Cluster cluster)
throws IOException, InterruptedException {
displayJobList(cluster.getAllJobStatuses());
}
/**
* Display the list of active trackers
*/
private void listActiveTrackers(Cluster cluster)
throws IOException, InterruptedException {
TaskTrackerInfo[] trackers = cluster.getActiveTaskTrackers();
for (TaskTrackerInfo tracker : trackers) {
System.out.println(tracker.getTaskTrackerName());
}
}
/**
* Display the list of blacklisted trackers
*/
private void listBlacklistedTrackers(Cluster cluster)
throws IOException, InterruptedException {
TaskTrackerInfo[] trackers = cluster.getBlackListedTaskTrackers();
if (trackers.length > 0) {
System.out.println("BlackListedNode \t Reason");
}
for (TaskTrackerInfo tracker : trackers) {
System.out.println(tracker.getTaskTrackerName() + "\t" +
tracker.getReasonForBlacklist());
}
}
private void printTaskAttempts(TaskReport report) {
if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
System.out.println(report.getSuccessfulTaskAttemptId());
} else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
for (TaskAttemptID t :
report.getRunningTaskAttemptIds()) {
System.out.println(t);
}
}
}
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
org.apache.hadoop.util.StringUtils.toUpperCase(type)));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
(state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
(state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
(state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
(state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}
public void displayJobList(JobStatus[] jobs)
throws IOException, InterruptedException {
displayJobList(jobs, new PrintWriter(new OutputStreamWriter(System.out,
Charsets.UTF_8)));
}
@Private
public static String headerPattern = "%23s\t%10s\t%14s\t%12s\t%12s\t%10s\t%15s\t%15s\t%8s\t%8s\t%10s\t%10s\n";
@Private
public static String dataPattern = "%23s\t%10s\t%14d\t%12s\t%12s\t%10s\t%15s\t%15s\t%8s\t%8s\t%10s\t%10s\n";
private static String memPattern = "%dM";
private static String UNAVAILABLE = "N/A";
@Private
public void displayJobList(JobStatus[] jobs, PrintWriter writer) {
writer.println("Total jobs:" + jobs.length);
writer.printf(headerPattern, "JobId", "State", "StartTime", "UserName",
"Queue", "Priority", "UsedContainers",
"RsvdContainers", "UsedMem", "RsvdMem", "NeededMem", "AM info");
for (JobStatus job : jobs) {
int numUsedSlots = job.getNumUsedSlots();
int numReservedSlots = job.getNumReservedSlots();
int usedMem = job.getUsedMem();
int rsvdMem = job.getReservedMem();
int neededMem = job.getNeededMem();
writer.printf(dataPattern,
job.getJobID().toString(), job.getState(), job.getStartTime(),
job.getUsername(), job.getQueue(),
job.getPriority().name(),
numUsedSlots < 0 ? UNAVAILABLE : numUsedSlots,
numReservedSlots < 0 ? UNAVAILABLE : numReservedSlots,
usedMem < 0 ? UNAVAILABLE : String.format(memPattern, usedMem),
rsvdMem < 0 ? UNAVAILABLE : String.format(memPattern, rsvdMem),
neededMem < 0 ? UNAVAILABLE : String.format(memPattern, neededMem),
job.getSchedulingInfo());
}
writer.flush();
}
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new CLI(), argv);
ExitUtil.terminate(res);
}
}
| 23,265 | 34.738863 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Master;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
/**
* This class provides user facing APIs for transferring secrets from
* the job client to the tasks.
* The secrets can be stored just before submission of jobs and read during
* the task execution.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TokenCache {
private static final Log LOG = LogFactory.getLog(TokenCache.class);
/**
* auxiliary method to get user's secret keys..
* @param alias
* @return secret key from the storage
*/
public static byte[] getSecretKey(Credentials credentials, Text alias) {
if(credentials == null)
return null;
return credentials.getSecretKey(alias);
}
/**
* Convenience method to obtain delegation tokens from namenodes
* corresponding to the paths passed.
* @param credentials
* @param ps array of paths
* @param conf configuration
* @throws IOException
*/
public static void obtainTokensForNamenodes(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
obtainTokensForNamenodesInternal(credentials, ps, conf);
}
/**
* Remove jobtoken referrals which don't make sense in the context
* of the task execution.
*
* @param conf
*/
public static void cleanUpTokenReferral(Configuration conf) {
conf.unset(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
}
static void obtainTokensForNamenodesInternal(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
Set<FileSystem> fsSet = new HashSet<FileSystem>();
for(Path p: ps) {
fsSet.add(p.getFileSystem(conf));
}
for (FileSystem fs : fsSet) {
obtainTokensForNamenodesInternal(fs, credentials, conf);
}
}
static boolean isTokenRenewalExcluded(FileSystem fs, Configuration conf) {
String [] nns =
conf.getStrings(MRJobConfig.JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE);
if (nns != null) {
String host = fs.getUri().getHost();
for(int i=0; i< nns.length; i++) {
if (nns[i].equals(host)) {
return true;
}
}
}
return false;
}
/**
* get delegation token for a specific FS
* @param fs
* @param credentials
* @param conf
* @throws IOException
*/
static void obtainTokensForNamenodesInternal(FileSystem fs,
Credentials credentials, Configuration conf) throws IOException {
// RM skips renewing token with empty renewer
String delegTokenRenewer = "";
if (!isTokenRenewalExcluded(fs, conf)) {
delegTokenRenewer = Master.getMasterPrincipal(conf);
if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
throw new IOException(
"Can't get Master Kerberos principal for use as renewer");
}
}
mergeBinaryTokens(credentials, conf);
final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer,
credentials);
if (tokens != null) {
for (Token<?> token : tokens) {
LOG.info("Got dt for " + fs.getUri() + "; "+token);
}
}
}
private static void mergeBinaryTokens(Credentials creds, Configuration conf) {
String binaryTokenFilename =
conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
if (binaryTokenFilename != null) {
Credentials binary;
try {
binary = Credentials.readTokenStorageFile(
FileSystem.getLocal(conf).makeQualified(
new Path(binaryTokenFilename)),
conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
// supplement existing tokens with the tokens in the binary file
creds.mergeAll(binary);
}
}
/**
* file name used on HDFS for generated job token
*/
@InterfaceAudience.Private
public static final String JOB_TOKEN_HDFS_FILE = "jobToken";
/**
* conf setting for job tokens cache file name
*/
@InterfaceAudience.Private
public static final String JOB_TOKENS_FILENAME = "mapreduce.job.jobTokenFile";
private static final Text JOB_TOKEN = new Text("JobToken");
private static final Text SHUFFLE_TOKEN = new Text("MapReduceShuffleToken");
private static final Text ENC_SPILL_KEY = new Text("MapReduceEncryptedSpillKey");
/**
* load job token from a file
* @deprecated Use {@link Credentials#readTokenStorageFile} instead,
* this method is included for compatibility against Hadoop-1.
* @param conf
* @throws IOException
*/
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, JobConf conf)
throws IOException {
Path localJobTokenFile = new Path ("file:///" + jobTokenFile);
Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);
if(LOG.isDebugEnabled()) {
LOG.debug("Task: Loaded jobTokenFile from: "+
localJobTokenFile.toUri().getPath()
+"; num of sec keys = " + ts.numberOfSecretKeys() +
" Number of tokens " + ts.numberOfTokens());
}
return ts;
}
/**
* load job token from a file
* @deprecated Use {@link Credentials#readTokenStorageFile} instead,
* this method is included for compatibility against Hadoop-1.
* @param conf
* @throws IOException
*/
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, Configuration conf)
throws IOException {
return loadTokens(jobTokenFile, new JobConf(conf));
}
/**
* store job token
* @param t
*/
@InterfaceAudience.Private
public static void setJobToken(Token<? extends TokenIdentifier> t,
Credentials credentials) {
credentials.addToken(JOB_TOKEN, t);
}
/**
*
* @return job token
*/
@SuppressWarnings("unchecked")
@InterfaceAudience.Private
public static Token<JobTokenIdentifier> getJobToken(Credentials credentials) {
return (Token<JobTokenIdentifier>) credentials.getToken(JOB_TOKEN);
}
@InterfaceAudience.Private
public static void setShuffleSecretKey(byte[] key, Credentials credentials) {
credentials.addSecretKey(SHUFFLE_TOKEN, key);
}
@InterfaceAudience.Private
public static byte[] getShuffleSecretKey(Credentials credentials) {
return getSecretKey(credentials, SHUFFLE_TOKEN);
}
@InterfaceAudience.Private
public static void setEncryptedSpillKey(byte[] key, Credentials credentials) {
credentials.addSecretKey(ENC_SPILL_KEY, key);
}
@InterfaceAudience.Private
public static byte[] getEncryptedSpillKey(Credentials credentials) {
return getSecretKey(credentials, ENC_SPILL_KEY);
}
/**
* @deprecated Use {@link Credentials#getToken(org.apache.hadoop.io.Text)}
* instead, this method is included for compatibility against Hadoop-1
* @param namenode
* @return delegation token
*/
@InterfaceAudience.Private
@Deprecated
public static
Token<?> getDelegationToken(
Credentials credentials, String namenode) {
return (Token<?>) credentials.getToken(new Text(
namenode));
}
}
| 8,804 | 31.371324 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import javax.crypto.SecretKey;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.record.Utils;
import com.google.common.base.Charsets;
/**
*
* utilities for generating kyes, hashes and verifying them for shuffle
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SecureShuffleUtils {
private static final Log LOG = LogFactory.getLog(SecureShuffleUtils.class);
public static final String HTTP_HEADER_URL_HASH = "UrlHash";
public static final String HTTP_HEADER_REPLY_URL_HASH = "ReplyHash";
/**
* Base64 encoded hash of msg
* @param msg
*/
public static String generateHash(byte[] msg, SecretKey key) {
return new String(Base64.encodeBase64(generateByteHash(msg, key)),
Charsets.UTF_8);
}
/**
* calculate hash of msg
* @param msg
* @return
*/
private static byte[] generateByteHash(byte[] msg, SecretKey key) {
return JobTokenSecretManager.computeHash(msg, key);
}
/**
* verify that hash equals to HMacHash(msg)
* @param newHash
* @return true if is the same
*/
private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) {
byte[] msg_hash = generateByteHash(msg, key);
return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0;
}
/**
* Aux util to calculate hash of a String
* @param enc_str
* @param key
* @return Base64 encodedHash
* @throws IOException
*/
public static String hashFromString(String enc_str, SecretKey key)
throws IOException {
return generateHash(enc_str.getBytes(Charsets.UTF_8), key);
}
/**
* verify that base64Hash is same as HMacHash(msg)
* @param base64Hash (Base64 encoded hash)
* @param msg
* @throws IOException if not the same
*/
public static void verifyReply(String base64Hash, String msg, SecretKey key)
throws IOException {
byte[] hash = Base64.decodeBase64(base64Hash.getBytes(Charsets.UTF_8));
boolean res = verifyHash(hash, msg.getBytes(Charsets.UTF_8), key);
if(res != true) {
throw new IOException("Verification of the hashReply failed");
}
}
/**
* Shuffle specific utils - build string for encoding from URL
* @param url
* @return string for encoding
*/
public static String buildMsgFrom(URL url) {
return buildMsgFrom(url.getPath(), url.getQuery(), url.getPort());
}
/**
* Shuffle specific utils - build string for encoding from URL
* @param request
* @return string for encoding
*/
public static String buildMsgFrom(HttpServletRequest request ) {
return buildMsgFrom(request.getRequestURI(), request.getQueryString(),
request.getLocalPort());
}
/**
* Shuffle specific utils - build string for encoding from URL
* @param uri_path
* @param uri_query
* @return string for encoding
*/
private static String buildMsgFrom(String uri_path, String uri_query, int port) {
return String.valueOf(port) + uri_path + "?" + uri_query;
}
/**
* byte array to Hex String
*
* @param ba
* @return string with HEX value of the key
*/
public static String toHex(byte[] ba) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
String strHex = "";
try {
PrintStream ps = new PrintStream(baos, false, "UTF-8");
for (byte b : ba) {
ps.printf("%x", b);
}
strHex = baos.toString("UTF-8");
} catch (UnsupportedEncodingException e) {
}
return strHex;
}
}
| 4,852 | 30.108974 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.security.token;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,034 | 42.125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
/**
* The token identifier for job token
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobTokenIdentifier extends TokenIdentifier {
private Text jobid;
public final static Text KIND_NAME = new Text("mapreduce.job");
/**
* Default constructor
*/
public JobTokenIdentifier() {
this.jobid = new Text();
}
/**
* Create a job token identifier from a jobid
* @param jobid the jobid to use
*/
public JobTokenIdentifier(Text jobid) {
this.jobid = jobid;
}
/** {@inheritDoc} */
@Override
public Text getKind() {
return KIND_NAME;
}
/** {@inheritDoc} */
@Override
public UserGroupInformation getUser() {
if (jobid == null || "".equals(jobid.toString())) {
return null;
}
return UserGroupInformation.createRemoteUser(jobid.toString());
}
/**
* Get the jobid
* @return the jobid
*/
public Text getJobId() {
return jobid;
}
/** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
jobid.readFields(in);
}
/** {@inheritDoc} */
@Override
public void write(DataOutput out) throws IOException {
jobid.write(out);
}
@InterfaceAudience.Private
public static class Renewer extends Token.TrivialRenewer {
@Override
protected Text getKind() {
return KIND_NAME;
}
}
}
| 2,620 | 25.474747 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
/**
* Look through tokens to find the first job token that matches the service
* and return it.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobTokenSelector implements TokenSelector<JobTokenIdentifier> {
@SuppressWarnings("unchecked")
@Override
public Token<JobTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<JobTokenIdentifier>) token;
}
}
return null;
}
}
| 1,920 | 34.574074 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token;
import java.util.Map;
import java.util.TreeMap;
import javax.crypto.SecretKey;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
/**
* SecretManager for job token. It can be used to cache generated job tokens.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobTokenSecretManager extends SecretManager<JobTokenIdentifier> {
private final SecretKey masterKey;
private final Map<String, SecretKey> currentJobTokens;
/**
* Convert the byte[] to a secret key
* @param key the byte[] to create the secret key from
* @return the secret key
*/
public static SecretKey createSecretKey(byte[] key) {
return SecretManager.createSecretKey(key);
}
/**
* Compute the HMAC hash of the message using the key
* @param msg the message to hash
* @param key the key to use
* @return the computed hash
*/
public static byte[] computeHash(byte[] msg, SecretKey key) {
return createPassword(msg, key);
}
/**
* Default constructor
*/
public JobTokenSecretManager() {
this.masterKey = generateSecret();
this.currentJobTokens = new TreeMap<String, SecretKey>();
}
/**
* Create a new password/secret for the given job token identifier.
* @param identifier the job token identifier
* @return token password/secret
*/
@Override
public byte[] createPassword(JobTokenIdentifier identifier) {
byte[] result = createPassword(identifier.getBytes(), masterKey);
return result;
}
/**
* Add the job token of a job to cache
* @param jobId the job that owns the token
* @param token the job token
*/
public void addTokenForJob(String jobId, Token<JobTokenIdentifier> token) {
SecretKey tokenSecret = createSecretKey(token.getPassword());
synchronized (currentJobTokens) {
currentJobTokens.put(jobId, tokenSecret);
}
}
/**
* Remove the cached job token of a job from cache
* @param jobId the job whose token is to be removed
*/
public void removeTokenForJob(String jobId) {
synchronized (currentJobTokens) {
currentJobTokens.remove(jobId);
}
}
/**
* Look up the token password/secret for the given jobId.
* @param jobId the jobId to look up
* @return token password/secret as SecretKey
* @throws InvalidToken
*/
public SecretKey retrieveTokenSecret(String jobId) throws InvalidToken {
SecretKey tokenSecret = null;
synchronized (currentJobTokens) {
tokenSecret = currentJobTokens.get(jobId);
}
if (tokenSecret == null) {
throw new InvalidToken("Can't find job token for job " + jobId + " !!");
}
return tokenSecret;
}
/**
* Look up the token password/secret for the given job token identifier.
* @param identifier the job token identifier to look up
* @return token password/secret as byte[]
* @throws InvalidToken
*/
@Override
public byte[] retrievePassword(JobTokenIdentifier identifier)
throws InvalidToken {
return retrieveTokenSecret(identifier.getJobId().toString()).getEncoded();
}
/**
* Create an empty job token identifier
* @return a newly created empty job token identifier
*/
@Override
public JobTokenIdentifier createIdentifier() {
return new JobTokenIdentifier();
}
}
| 4,298 | 30.152174 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
* A delegation token identifier that is specific to MapReduce.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
public static final Text MAPREDUCE_DELEGATION_KIND =
new Text("MAPREDUCE_DELEGATION_TOKEN");
/**
* Create an empty delegation token identifier for reading into.
*/
public DelegationTokenIdentifier() {
}
/**
* Create a new delegation token identifier
* @param owner the effective username of the token owner
* @param renewer the username of the renewer
* @param realUser the real username of the token owner
*/
public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
super(owner, renewer, realUser);
}
@Override
public Text getKind() {
return MAPREDUCE_DELEGATION_KIND;
}
}
| 1,979 | 33.137931 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,045 | 42.583333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
/**
* A delegation token that is specialized for MapReduce
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegationTokenSelector
extends AbstractDelegationTokenSelector<DelegationTokenIdentifier>{
public DelegationTokenSelector() {
super(DelegationTokenIdentifier.MAPREDUCE_DELEGATION_KIND);
}
}
| 1,412 | 38.25 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
/**
* A MapReduce specific delegation token secret manager.
* The secret manager is responsible for generating and accepting the password
* for each token.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
/**
* Create a secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
* for expired tokens in milliseconds
*/
public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier();
}
}
| 2,482 | 40.383333 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.split;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,024 | 43.565217 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/SplitMetaInfoReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.split;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* A utility that reads the split meta info and creates
* split meta info objects
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SplitMetaInfoReader {
public static JobSplit.TaskSplitMetaInfo[] readSplitMetaInfo(
JobID jobId, FileSystem fs, Configuration conf, Path jobSubmitDir)
throws IOException {
long maxMetaInfoSize = conf.getLong(MRJobConfig.SPLIT_METAINFO_MAXSIZE,
MRJobConfig.DEFAULT_SPLIT_METAINFO_MAXSIZE);
Path metaSplitFile = JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir);
String jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDir).toString();
FileStatus fStatus = fs.getFileStatus(metaSplitFile);
if (maxMetaInfoSize > 0 && fStatus.getLen() > maxMetaInfoSize) {
throw new IOException("Split metadata size exceeded " +
maxMetaInfoSize +". Aborting job " + jobId);
}
FSDataInputStream in = fs.open(metaSplitFile);
byte[] header = new byte[JobSplit.META_SPLIT_FILE_HEADER.length];
in.readFully(header);
if (!Arrays.equals(JobSplit.META_SPLIT_FILE_HEADER, header)) {
throw new IOException("Invalid header on split file");
}
int vers = WritableUtils.readVInt(in);
if (vers != JobSplit.META_SPLIT_VERSION) {
in.close();
throw new IOException("Unsupported split version " + vers);
}
int numSplits = WritableUtils.readVInt(in); //TODO: check for insane values
JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo =
new JobSplit.TaskSplitMetaInfo[numSplits];
for (int i = 0; i < numSplits; i++) {
JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
splitMetaInfo.readFields(in);
JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(
jobSplitFile,
splitMetaInfo.getStartOffset());
allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex,
splitMetaInfo.getLocations(),
splitMetaInfo.getInputDataLength());
}
in.close();
return allSplitMetaInfo;
}
}
| 3,465 | 39.776471 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.split;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class groups the fundamental classes associated with
* reading/writing splits. The split information is divided into
* two parts based on the consumer of the information. The two
* parts are the split meta information, and the raw split
* information. The first part is consumed by the JobTracker to
* create the tasks' locality data structures. The second part is
* used by the maps at runtime to know what to do!
* These pieces of information are written to two separate files.
* The metainformation file is slurped by the JobTracker during
* job initialization. A map task gets the meta information during
* the launch and it reads the raw split bytes directly from the
* file.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobSplit {
static final int META_SPLIT_VERSION = 1;
static final byte[] META_SPLIT_FILE_HEADER;
static {
try {
META_SPLIT_FILE_HEADER = "META-SPL".getBytes("UTF-8");
} catch (UnsupportedEncodingException u) {
throw new RuntimeException(u);
}
}
public static final TaskSplitMetaInfo EMPTY_TASK_SPLIT =
new TaskSplitMetaInfo();
/**
* This represents the meta information about the task split.
* The main fields are
* - start offset in actual split
* - data length that will be processed in this split
* - hosts on which this split is local
*/
public static class SplitMetaInfo implements Writable {
private long startOffset;
private long inputDataLength;
private String[] locations;
public SplitMetaInfo() {}
public SplitMetaInfo(String[] locations, long startOffset,
long inputDataLength) {
this.locations = locations;
this.startOffset = startOffset;
this.inputDataLength = inputDataLength;
}
public SplitMetaInfo(InputSplit split, long startOffset) throws IOException {
try {
this.locations = split.getLocations();
this.inputDataLength = split.getLength();
this.startOffset = startOffset;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
public String[] getLocations() {
return locations;
}
public long getStartOffset() {
return startOffset;
}
public long getInputDataLength() {
return inputDataLength;
}
public void setInputDataLocations(String[] locations) {
this.locations = locations;
}
public void setInputDataLength(long length) {
this.inputDataLength = length;
}
public void readFields(DataInput in) throws IOException {
int len = WritableUtils.readVInt(in);
locations = new String[len];
for (int i = 0; i < locations.length; i++) {
locations[i] = Text.readString(in);
}
startOffset = WritableUtils.readVLong(in);
inputDataLength = WritableUtils.readVLong(in);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, locations.length);
for (int i = 0; i < locations.length; i++) {
Text.writeString(out, locations[i]);
}
WritableUtils.writeVLong(out, startOffset);
WritableUtils.writeVLong(out, inputDataLength);
}
@Override
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("data-size : " + inputDataLength + "\n");
buf.append("start-offset : " + startOffset + "\n");
buf.append("locations : " + "\n");
for (String loc : locations) {
buf.append(" " + loc + "\n");
}
return buf.toString();
}
}
/**
* This represents the meta information about the task split that the
* JobTracker creates
*/
public static class TaskSplitMetaInfo {
private TaskSplitIndex splitIndex;
private long inputDataLength;
private String[] locations;
public TaskSplitMetaInfo(){
this.splitIndex = new TaskSplitIndex();
this.locations = new String[0];
}
public TaskSplitMetaInfo(TaskSplitIndex splitIndex, String[] locations,
long inputDataLength) {
this.splitIndex = splitIndex;
this.locations = locations;
this.inputDataLength = inputDataLength;
}
public TaskSplitMetaInfo(InputSplit split, long startOffset)
throws InterruptedException, IOException {
this(new TaskSplitIndex("", startOffset), split.getLocations(),
split.getLength());
}
public TaskSplitMetaInfo(String[] locations, long startOffset,
long inputDataLength) {
this(new TaskSplitIndex("",startOffset), locations, inputDataLength);
}
public TaskSplitIndex getSplitIndex() {
return splitIndex;
}
public String getSplitLocation() {
return splitIndex.getSplitLocation();
}
public long getInputDataLength() {
return inputDataLength;
}
public String[] getLocations() {
return locations;
}
public long getStartOffset() {
return splitIndex.getStartOffset();
}
}
/**
* This represents the meta information about the task split that the
* task gets
*/
public static class TaskSplitIndex {
private String splitLocation;
private long startOffset;
public TaskSplitIndex(){
this("", 0);
}
public TaskSplitIndex(String splitLocation, long startOffset) {
this.splitLocation = splitLocation;
this.startOffset = startOffset;
}
public long getStartOffset() {
return startOffset;
}
public String getSplitLocation() {
return splitLocation;
}
public void readFields(DataInput in) throws IOException {
splitLocation = Text.readString(in);
startOffset = WritableUtils.readVLong(in);
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, splitLocation);
WritableUtils.writeVLong(out, startOffset);
}
}
}
| 7,182 | 31.65 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.split;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* The class that is used by the Job clients to write splits (both the meta
* and the raw bytes parts)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobSplitWriter {
private static final Log LOG = LogFactory.getLog(JobSplitWriter.class);
private static final int splitVersion = JobSplit.META_SPLIT_VERSION;
private static final byte[] SPLIT_FILE_HEADER;
static {
try {
SPLIT_FILE_HEADER = "SPL".getBytes("UTF-8");
} catch (UnsupportedEncodingException u) {
throw new RuntimeException(u);
}
}
@SuppressWarnings("unchecked")
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs, List<InputSplit> splits)
throws IOException, InterruptedException {
T[] array = (T[]) splits.toArray(new InputSplit[splits.size()]);
createSplitFiles(jobSubmitDir, conf, fs, array);
}
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs, T[] splits)
throws IOException, InterruptedException {
FSDataOutputStream out = createFile(fs,
JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
out.close();
writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir),
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
info);
}
public static void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs,
org.apache.hadoop.mapred.InputSplit[] splits)
throws IOException {
FSDataOutputStream out = createFile(fs,
JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
out.close();
writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir),
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
info);
}
private static FSDataOutputStream createFile(FileSystem fs, Path splitFile,
Configuration job) throws IOException {
FSDataOutputStream out = FileSystem.create(fs, splitFile,
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
int replication = job.getInt(Job.SUBMIT_REPLICATION, 10);
fs.setReplication(splitFile, (short)replication);
writeSplitHeader(out);
return out;
}
private static void writeSplitHeader(FSDataOutputStream out)
throws IOException {
out.write(SPLIT_FILE_HEADER);
out.writeInt(splitVersion);
}
@SuppressWarnings("unchecked")
private static <T extends InputSplit>
SplitMetaInfo[] writeNewSplits(Configuration conf,
T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {
SplitMetaInfo[] info = new SplitMetaInfo[array.length];
if (array.length != 0) {
SerializationFactory factory = new SerializationFactory(conf);
int i = 0;
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
long offset = out.getPos();
for(T split: array) {
long prevCount = out.getPos();
Text.writeString(out, split.getClass().getName());
Serializer<T> serializer =
factory.getSerializer((Class<T>) split.getClass());
serializer.open(out);
serializer.serialize(split);
long currCount = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
LOG.warn("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
locations = Arrays.copyOf(locations, maxBlockLocations);
}
info[i++] =
new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currCount - prevCount;
}
}
return info;
}
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out, Configuration conf) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.getPos();
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
LOG.warn("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
locations = Arrays.copyOf(locations,maxBlockLocations);
}
info[i++] = new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename,
FsPermission p, int splitMetaInfoVersion,
JobSplit.SplitMetaInfo[] allSplitMetaInfo)
throws IOException {
// write the splits meta-info to a file for the job tracker
FSDataOutputStream out =
FileSystem.create(fs, filename, p);
out.write(JobSplit.META_SPLIT_FILE_HEADER);
WritableUtils.writeVInt(out, splitMetaInfoVersion);
WritableUtils.writeVInt(out, allSplitMetaInfo.length);
for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
splitMetaInfo.write(out);
}
out.close();
}
}
| 7,700 | 37.698492 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.avro.util.Utf8;
/**
* Event to record the initialization of a job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobInitedEvent implements HistoryEvent {
private JobInited datum = new JobInited();
/**
* Create an event to record job initialization
* @param id
* @param launchTime
* @param totalMaps
* @param totalReduces
* @param jobStatus
* @param uberized True if the job's map and reduce stages were combined
*/
public JobInitedEvent(JobID id, long launchTime, int totalMaps,
int totalReduces, String jobStatus, boolean uberized) {
datum.setJobid(new Utf8(id.toString()));
datum.setLaunchTime(launchTime);
datum.setTotalMaps(totalMaps);
datum.setTotalReduces(totalReduces);
datum.setJobStatus(new Utf8(jobStatus));
datum.setUberized(uberized);
}
JobInitedEvent() { }
public Object getDatum() { return datum; }
public void setDatum(Object datum) { this.datum = (JobInited)datum; }
/** Get the job ID */
public JobID getJobId() { return JobID.forName(datum.getJobid().toString()); }
/** Get the launch time */
public long getLaunchTime() { return datum.getLaunchTime(); }
/** Get the total number of maps */
public int getTotalMaps() { return datum.getTotalMaps(); }
/** Get the total number of reduces */
public int getTotalReduces() { return datum.getTotalReduces(); }
/** Get the status */
public String getStatus() { return datum.getJobStatus().toString(); }
/** Get the event type */
public EventType getEventType() {
return EventType.JOB_INITED;
}
/** Get whether the job's map and reduce stages were combined */
public boolean getUberized() { return datum.getUberized(); }
}
| 2,775 | 35.051948 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,030 | 41.958333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record successful task completion
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskAttemptFinishedEvent implements HistoryEvent {
private TaskAttemptFinished datum = null;
private TaskAttemptID attemptId;
private TaskType taskType;
private String taskStatus;
private long finishTime;
private String rackName;
private String hostname;
private String state;
private Counters counters;
/**
* Create an event to record successful finishes for setup and cleanup
* attempts
* @param id Attempt ID
* @param taskType Type of task
* @param taskStatus Status of task
* @param finishTime Finish time of attempt
* @param hostname Host where the attempt executed
* @param state State string
* @param counters Counters for the attempt
*/
public TaskAttemptFinishedEvent(TaskAttemptID id,
TaskType taskType, String taskStatus,
long finishTime, String rackName,
String hostname, String state, Counters counters) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.finishTime = finishTime;
this.rackName = rackName;
this.hostname = hostname;
this.state = state;
this.counters = counters;
}
TaskAttemptFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new TaskAttemptFinished();
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setTaskType(new Utf8(taskType.name()));
datum.setTaskStatus(new Utf8(taskStatus));
datum.setFinishTime(finishTime);
if (rackName != null) {
datum.setRackname(new Utf8(rackName));
}
datum.setHostname(new Utf8(hostname));
datum.setState(new Utf8(state));
datum.setCounters(EventWriter.toAvro(counters));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (TaskAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.getAttemptId().toString());
this.taskType = TaskType.valueOf(datum.getTaskType().toString());
this.taskStatus = datum.getTaskStatus().toString();
this.finishTime = datum.getFinishTime();
this.rackName = datum.getRackname().toString();
this.hostname = datum.getHostname().toString();
this.state = datum.getState().toString();
this.counters = EventReader.fromAvro(datum.getCounters());
}
/** Get the task ID */
public TaskID getTaskId() { return attemptId.getTaskID(); }
/** Get the task attempt id */
public TaskAttemptID getAttemptId() {
return attemptId;
}
/** Get the task type */
public TaskType getTaskType() {
return taskType;
}
/** Get the task status */
public String getTaskStatus() { return taskStatus.toString(); }
/** Get the attempt finish time */
public long getFinishTime() { return finishTime; }
/** Get the host where the attempt executed */
public String getHostname() { return hostname.toString(); }
/** Get the rackname where the attempt executed */
public String getRackName() {
return rackName == null ? null : rackName.toString();
}
/** Get the state string */
public String getState() { return state.toString(); }
/** Get the counters for the attempt */
Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
// Note that the task type can be setup/map/reduce/cleanup but the
// attempt-type can only be map/reduce.
return getTaskId().getTaskType() == TaskType.MAP
? EventType.MAP_ATTEMPT_FINISHED
: EventType.REDUCE_ATTEMPT_FINISHED;
}
}
| 4,883 | 33.885714 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobPriorityChangeEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.avro.util.Utf8;
/**
* Event to record the change of priority of a job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobPriorityChangeEvent implements HistoryEvent {
private JobPriorityChange datum = new JobPriorityChange();
/** Generate an event to record changes in Job priority
* @param id Job Id
* @param priority The new priority of the job
*/
public JobPriorityChangeEvent(JobID id, JobPriority priority) {
datum.setJobid(new Utf8(id.toString()));
datum.setPriority(new Utf8(priority.name()));
}
JobPriorityChangeEvent() { }
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (JobPriorityChange)datum;
}
/** Get the Job ID */
public JobID getJobId() {
return JobID.forName(datum.getJobid().toString());
}
/** Get the job priority */
public JobPriority getPriority() {
return JobPriority.valueOf(datum.getPriority().toString());
}
/** Get the event type */
public EventType getEventType() {
return EventType.JOB_PRIORITY_CHANGED;
}
}
| 2,195 | 30.826087 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobStatusChangedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.avro.util.Utf8;
/**
* Event to record the change of status for a job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobStatusChangedEvent implements HistoryEvent {
private JobStatusChanged datum = new JobStatusChanged();
/**
* Create an event to record the change in the Job Status
* @param id Job ID
* @param jobStatus The new job status
*/
public JobStatusChangedEvent(JobID id, String jobStatus) {
datum.setJobid(new Utf8(id.toString()));
datum.setJobStatus(new Utf8(jobStatus));
}
JobStatusChangedEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (JobStatusChanged)datum;
}
/** Get the Job Id */
public JobID getJobId() { return JobID.forName(datum.getJobid().toString()); }
/** Get the event status */
public String getStatus() { return datum.getJobStatus().toString(); }
/** Get the event type */
public EventType getEventType() {
return EventType.JOB_STATUS_CHANGED;
}
}
| 2,092 | 31.2 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record successful completion of a reduce attempt
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ReduceAttemptFinishedEvent implements HistoryEvent {
private ReduceAttemptFinished datum = null;
private TaskAttemptID attemptId;
private TaskType taskType;
private String taskStatus;
private long shuffleFinishTime;
private long sortFinishTime;
private long finishTime;
private String hostname;
private String rackName;
private int port;
private String state;
private Counters counters;
int[][] allSplits;
int[] clockSplits;
int[] cpuUsages;
int[] vMemKbytes;
int[] physMemKbytes;
/**
* Create an event to record completion of a reduce attempt
* @param id Attempt Id
* @param taskType Type of task
* @param taskStatus Status of the task
* @param shuffleFinishTime Finish time of the shuffle phase
* @param sortFinishTime Finish time of the sort phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
* @param port RPC port for the tracker host.
* @param rackName Name of the rack where the attempt executed
* @param state State of the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*/
public ReduceAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long shuffleFinishTime, long sortFinishTime, long finishTime,
String hostname, int port, String rackName, String state,
Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.shuffleFinishTime = shuffleFinishTime;
this.sortFinishTime = sortFinishTime;
this.finishTime = finishTime;
this.hostname = hostname;
this.rackName = rackName;
this.port = port;
this.state = state;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
/**
* @deprecated please use the constructor with an additional
* argument, an array of splits arrays instead. See
* {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
* for an explanation of the meaning of that parameter.
*
* Create an event to record completion of a reduce attempt
* @param id Attempt Id
* @param taskType Type of task
* @param taskStatus Status of the task
* @param shuffleFinishTime Finish time of the shuffle phase
* @param sortFinishTime Finish time of the sort phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
* @param state State of the attempt
* @param counters Counters for the attempt
*/
public ReduceAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long shuffleFinishTime, long sortFinishTime, long finishTime,
String hostname, String state, Counters counters) {
this(id, taskType, taskStatus,
shuffleFinishTime, sortFinishTime, finishTime,
hostname, -1, "", state, counters, null);
}
ReduceAttemptFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new ReduceAttemptFinished();
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setTaskType(new Utf8(taskType.name()));
datum.setTaskStatus(new Utf8(taskStatus));
datum.setShuffleFinishTime(shuffleFinishTime);
datum.setSortFinishTime(sortFinishTime);
datum.setFinishTime(finishTime);
datum.setHostname(new Utf8(hostname));
datum.setPort(port);
if (rackName != null) {
datum.setRackname(new Utf8(rackName));
}
datum.setState(new Utf8(state));
datum.setCounters(EventWriter.toAvro(counters));
datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetWallclockTime(allSplits)));
datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetCPUTime(allSplits)));
datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetVMemKbytes(allSplits)));
datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetPhysMemKbytes(allSplits)));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (ReduceAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.getAttemptId().toString());
this.taskType = TaskType.valueOf(datum.getTaskType().toString());
this.taskStatus = datum.getTaskStatus().toString();
this.shuffleFinishTime = datum.getShuffleFinishTime();
this.sortFinishTime = datum.getSortFinishTime();
this.finishTime = datum.getFinishTime();
this.hostname = datum.getHostname().toString();
this.rackName = datum.getRackname().toString();
this.port = datum.getPort();
this.state = datum.getState().toString();
this.counters = EventReader.fromAvro(datum.getCounters());
this.clockSplits = AvroArrayUtils.fromAvro(datum.getClockSplits());
this.cpuUsages = AvroArrayUtils.fromAvro(datum.getCpuUsages());
this.vMemKbytes = AvroArrayUtils.fromAvro(datum.getVMemKbytes());
this.physMemKbytes = AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
}
/** Get the Task ID */
public TaskID getTaskId() { return attemptId.getTaskID(); }
/** Get the attempt id */
public TaskAttemptID getAttemptId() {
return attemptId;
}
/** Get the task type */
public TaskType getTaskType() {
return taskType;
}
/** Get the task status */
public String getTaskStatus() { return taskStatus.toString(); }
/** Get the finish time of the sort phase */
public long getSortFinishTime() { return sortFinishTime; }
/** Get the finish time of the shuffle phase */
public long getShuffleFinishTime() { return shuffleFinishTime; }
/** Get the finish time of the attempt */
public long getFinishTime() { return finishTime; }
/** Get the name of the host where the attempt ran */
public String getHostname() { return hostname.toString(); }
/** Get the tracker rpc port */
public int getPort() { return port; }
/** Get the rack name of the node where the attempt ran */
public String getRackName() {
return rackName == null ? null : rackName.toString();
}
/** Get the state string */
public String getState() { return state.toString(); }
/** Get the counters for the attempt */
Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
return EventType.REDUCE_ATTEMPT_FINISHED;
}
public int[] getClockSplits() {
return clockSplits;
}
public int[] getCpuUsages() {
return cpuUsages;
}
public int[] getVMemKbytes() {
return vMemKbytes;
}
public int[] getPhysMemKbytes() {
return physMemKbytes;
}
}
| 8,662 | 37.162996 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.avro.util.Utf8;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import com.google.common.annotations.VisibleForTesting;
/**
* Event Writer is an utility class used to write events to the underlying
* stream. Typically, one event writer (which translates to one stream)
* is created per job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class EventWriter {
static final String VERSION = "Avro-Json";
static final String VERSION_BINARY = "Avro-Binary";
private FSDataOutputStream out;
private DatumWriter<Event> writer =
new SpecificDatumWriter<Event>(Event.class);
private Encoder encoder;
private static final Log LOG = LogFactory.getLog(EventWriter.class);
/**
* avro encoding format supported by EventWriter.
*/
public enum WriteMode { JSON, BINARY }
private final WriteMode writeMode;
private final boolean jsonOutput; // Cache value while we have 2 modes
@VisibleForTesting
public EventWriter(FSDataOutputStream out, WriteMode mode)
throws IOException {
this.out = out;
this.writeMode = mode;
if (this.writeMode==WriteMode.JSON) {
this.jsonOutput = true;
out.writeBytes(VERSION);
} else if (this.writeMode==WriteMode.BINARY) {
this.jsonOutput = false;
out.writeBytes(VERSION_BINARY);
} else {
throw new IOException("Unknown mode: " + mode);
}
out.writeBytes("\n");
out.writeBytes(Event.SCHEMA$.toString());
out.writeBytes("\n");
if (!this.jsonOutput) {
this.encoder = EncoderFactory.get().binaryEncoder(out, null);
} else {
this.encoder = EncoderFactory.get().jsonEncoder(Event.SCHEMA$, out);
}
}
synchronized void write(HistoryEvent event) throws IOException {
Event wrapper = new Event();
wrapper.setType(event.getEventType());
wrapper.setEvent(event.getDatum());
writer.write(wrapper, encoder);
encoder.flush();
if (this.jsonOutput) {
out.writeBytes("\n");
}
}
void flush() throws IOException {
encoder.flush();
out.flush();
out.hflush();
}
@VisibleForTesting
public void close() throws IOException {
try {
encoder.flush();
out.close();
out = null;
} finally {
IOUtils.cleanup(LOG, out);
}
}
private static final Schema GROUPS =
Schema.createArray(JhCounterGroup.SCHEMA$);
private static final Schema COUNTERS =
Schema.createArray(JhCounter.SCHEMA$);
static JhCounters toAvro(Counters counters) {
return toAvro(counters, "COUNTERS");
}
static JhCounters toAvro(Counters counters, String name) {
JhCounters result = new JhCounters();
result.setName(new Utf8(name));
result.setGroups(new ArrayList<JhCounterGroup>(0));
if (counters == null) return result;
for (CounterGroup group : counters) {
JhCounterGroup g = new JhCounterGroup();
g.setName(new Utf8(group.getName()));
g.setDisplayName(new Utf8(group.getDisplayName()));
g.setCounts(new ArrayList<JhCounter>(group.size()));
for (Counter counter : group) {
JhCounter c = new JhCounter();
c.setName(new Utf8(counter.getName()));
c.setDisplayName(new Utf8(counter.getDisplayName()));
c.setValue(counter.getValue());
g.getCounts().add(c);
}
result.getGroups().add(g);
}
return result;
}
}
| 4,865 | 31.225166 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.avro.util.Utf8;
/**
* Event to record start of a task attempt
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskAttemptStartedEvent implements HistoryEvent {
private TaskAttemptStarted datum = new TaskAttemptStarted();
/**
* Create an event to record the start of an attempt
* @param attemptId Id of the attempt
* @param taskType Type of task
* @param startTime Start time of the attempt
* @param trackerName Name of the Task Tracker where attempt is running
* @param httpPort The port number of the tracker
* @param shufflePort The shuffle port number of the container
* @param containerId The containerId for the task attempt.
* @param locality The locality of the task attempt
* @param avataar The avataar of the task attempt
*/
public TaskAttemptStartedEvent( TaskAttemptID attemptId,
TaskType taskType, long startTime, String trackerName,
int httpPort, int shufflePort, ContainerId containerId,
String locality, String avataar) {
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setStartTime(startTime);
datum.setTaskType(new Utf8(taskType.name()));
datum.setTrackerName(new Utf8(trackerName));
datum.setHttpPort(httpPort);
datum.setShufflePort(shufflePort);
datum.setContainerId(new Utf8(containerId.toString()));
if (locality != null) {
datum.setLocality(new Utf8(locality));
}
if (avataar != null) {
datum.setAvataar(new Utf8(avataar));
}
}
// TODO Remove after MrV1 is removed.
// Using a dummy containerId to prevent jobHistory parse failures.
public TaskAttemptStartedEvent(TaskAttemptID attemptId, TaskType taskType,
long startTime, String trackerName, int httpPort, int shufflePort,
String locality, String avataar) {
this(attemptId, taskType, startTime, trackerName, httpPort, shufflePort,
ConverterUtils.toContainerId("container_-1_-1_-1_-1"), locality,
avataar);
}
TaskAttemptStartedEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (TaskAttemptStarted)datum;
}
/** Get the task id */
public TaskID getTaskId() {
return TaskID.forName(datum.getTaskid().toString());
}
/** Get the tracker name */
public String getTrackerName() { return datum.getTrackerName().toString(); }
/** Get the start time */
public long getStartTime() { return datum.getStartTime(); }
/** Get the task type */
public TaskType getTaskType() {
return TaskType.valueOf(datum.getTaskType().toString());
}
/** Get the HTTP port */
public int getHttpPort() { return datum.getHttpPort(); }
/** Get the shuffle port */
public int getShufflePort() { return datum.getShufflePort(); }
/** Get the attempt id */
public TaskAttemptID getTaskAttemptId() {
return TaskAttemptID.forName(datum.getAttemptId().toString());
}
/** Get the event type */
public EventType getEventType() {
// Note that the task type can be setup/map/reduce/cleanup but the
// attempt-type can only be map/reduce.
return getTaskId().getTaskType() == TaskType.MAP
? EventType.MAP_ATTEMPT_STARTED
: EventType.REDUCE_ATTEMPT_STARTED;
}
/** Get the ContainerId */
public ContainerId getContainerId() {
return ConverterUtils.toContainerId(datum.getContainerId().toString());
}
/** Get the locality */
public String getLocality() {
if (datum.getLocality() != null) {
return datum.getLocality().toString();
}
return null;
}
/** Get the avataar */
public String getAvataar() {
if (datum.getAvataar() != null) {
return datum.getAvataar().toString();
}
return null;
}
}
| 5,034 | 35.751825 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
public interface HistoryEventHandler {
void handleEvent(HistoryEvent event) throws IOException;
}
| 988 | 34.321429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface for event wrapper classes. Implementations each wrap an
* Avro-generated class, adding constructors and accessor methods.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface HistoryEvent {
/** Return this event's type. */
EventType getEventType();
/** Return the Avro datum wrapped by this. */
Object getDatum();
/** Set the Avro datum wrapped by this. */
void setDatum(Object datum);
}
| 1,423 | 33.731707 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSubmittedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.avro.util.Utf8;
/**
* Event to record the submission of a job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobSubmittedEvent implements HistoryEvent {
private JobSubmitted datum = new JobSubmitted();
/**
* Create an event to record job submission
* @param id The job Id of the job
* @param jobName Name of the job
* @param userName Name of the user who submitted the job
* @param submitTime Time of submission
* @param jobConfPath Path of the Job Configuration file
* @param jobACLs The configured acls for the job.
* @param jobQueueName The job-queue to which this job was submitted to
*/
public JobSubmittedEvent(JobID id, String jobName, String userName,
long submitTime, String jobConfPath,
Map<JobACL, AccessControlList> jobACLs, String jobQueueName) {
this(id, jobName, userName, submitTime, jobConfPath, jobACLs,
jobQueueName, "", "", "", "");
}
/**
* Create an event to record job submission
* @param id The job Id of the job
* @param jobName Name of the job
* @param userName Name of the user who submitted the job
* @param submitTime Time of submission
* @param jobConfPath Path of the Job Configuration file
* @param jobACLs The configured acls for the job.
* @param jobQueueName The job-queue to which this job was submitted to
* @param workflowId The Id of the workflow
* @param workflowName The name of the workflow
* @param workflowNodeName The node name of the workflow
* @param workflowAdjacencies The adjacencies of the workflow
*/
public JobSubmittedEvent(JobID id, String jobName, String userName,
long submitTime, String jobConfPath,
Map<JobACL, AccessControlList> jobACLs, String jobQueueName,
String workflowId, String workflowName, String workflowNodeName,
String workflowAdjacencies) {
this(id, jobName, userName, submitTime, jobConfPath, jobACLs,
jobQueueName, workflowId, workflowName, workflowNodeName,
workflowAdjacencies, "");
}
/**
* Create an event to record job submission
* @param id The job Id of the job
* @param jobName Name of the job
* @param userName Name of the user who submitted the job
* @param submitTime Time of submission
* @param jobConfPath Path of the Job Configuration file
* @param jobACLs The configured acls for the job.
* @param jobQueueName The job-queue to which this job was submitted to
* @param workflowId The Id of the workflow
* @param workflowName The name of the workflow
* @param workflowNodeName The node name of the workflow
* @param workflowAdjacencies The adjacencies of the workflow
* @param workflowTags Comma-separated tags for the workflow
*/
public JobSubmittedEvent(JobID id, String jobName, String userName,
long submitTime, String jobConfPath,
Map<JobACL, AccessControlList> jobACLs, String jobQueueName,
String workflowId, String workflowName, String workflowNodeName,
String workflowAdjacencies, String workflowTags) {
datum.setJobid(new Utf8(id.toString()));
datum.setJobName(new Utf8(jobName));
datum.setUserName(new Utf8(userName));
datum.setSubmitTime(submitTime);
datum.setJobConfPath(new Utf8(jobConfPath));
Map<CharSequence, CharSequence> jobAcls = new HashMap<CharSequence, CharSequence>();
for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
jobAcls.put(new Utf8(entry.getKey().getAclName()), new Utf8(
entry.getValue().getAclString()));
}
datum.setAcls(jobAcls);
if (jobQueueName != null) {
datum.setJobQueueName(new Utf8(jobQueueName));
}
if (workflowId != null) {
datum.setWorkflowId(new Utf8(workflowId));
}
if (workflowName != null) {
datum.setWorkflowName(new Utf8(workflowName));
}
if (workflowNodeName != null) {
datum.setWorkflowNodeName(new Utf8(workflowNodeName));
}
if (workflowAdjacencies != null) {
datum.setWorkflowAdjacencies(new Utf8(workflowAdjacencies));
}
if (workflowTags != null) {
datum.setWorkflowTags(new Utf8(workflowTags));
}
}
JobSubmittedEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (JobSubmitted)datum;
}
/** Get the Job Id */
public JobID getJobId() { return JobID.forName(datum.getJobid().toString()); }
/** Get the Job name */
public String getJobName() { return datum.getJobName().toString(); }
/** Get the Job queue name */
public String getJobQueueName() {
if (datum.getJobQueueName() != null) {
return datum.getJobQueueName().toString();
}
return null;
}
/** Get the user name */
public String getUserName() { return datum.getUserName().toString(); }
/** Get the submit time */
public long getSubmitTime() { return datum.getSubmitTime(); }
/** Get the Path for the Job Configuration file */
public String getJobConfPath() { return datum.getJobConfPath().toString(); }
/** Get the acls configured for the job **/
public Map<JobACL, AccessControlList> getJobAcls() {
Map<JobACL, AccessControlList> jobAcls =
new HashMap<JobACL, AccessControlList>();
for (JobACL jobACL : JobACL.values()) {
Utf8 jobACLsUtf8 = new Utf8(jobACL.getAclName());
if (datum.getAcls().containsKey(jobACLsUtf8)) {
jobAcls.put(jobACL, new AccessControlList(datum.getAcls().get(
jobACLsUtf8).toString()));
}
}
return jobAcls;
}
/** Get the id of the workflow */
public String getWorkflowId() {
if (datum.getWorkflowId() != null) {
return datum.getWorkflowId().toString();
}
return null;
}
/** Get the name of the workflow */
public String getWorkflowName() {
if (datum.getWorkflowName() != null) {
return datum.getWorkflowName().toString();
}
return null;
}
/** Get the node name of the workflow */
public String getWorkflowNodeName() {
if (datum.getWorkflowNodeName() != null) {
return datum.getWorkflowNodeName().toString();
}
return null;
}
/** Get the adjacencies of the workflow */
public String getWorkflowAdjacencies() {
if (datum.getWorkflowAdjacencies() != null) {
return datum.getWorkflowAdjacencies().toString();
}
return null;
}
/** Get the workflow tags */
public String getWorkflowTags() {
if (datum.getWorkflowTags() != null) {
return datum.getWorkflowTags().toString();
}
return null;
}
/** Get the event type */
public EventType getEventType() { return EventType.JOB_SUBMITTED; }
}
| 7,873 | 36.495238 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID;
/**
* Event to record successful completion of job
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobFinishedEvent implements HistoryEvent {
private JobFinished datum = null;
private JobID jobId;
private long finishTime;
private int finishedMaps;
private int finishedReduces;
private int failedMaps;
private int failedReduces;
private Counters mapCounters;
private Counters reduceCounters;
private Counters totalCounters;
/**
* Create an event to record successful job completion
* @param id Job ID
* @param finishTime Finish time of the job
* @param finishedMaps The number of finished maps
* @param finishedReduces The number of finished reduces
* @param failedMaps The number of failed maps
* @param failedReduces The number of failed reduces
* @param mapCounters Map Counters for the job
* @param reduceCounters Reduce Counters for the job
* @param totalCounters Total Counters for the job
*/
public JobFinishedEvent(JobID id, long finishTime,
int finishedMaps, int finishedReduces,
int failedMaps, int failedReduces,
Counters mapCounters, Counters reduceCounters,
Counters totalCounters) {
this.jobId = id;
this.finishTime = finishTime;
this.finishedMaps = finishedMaps;
this.finishedReduces = finishedReduces;
this.failedMaps = failedMaps;
this.failedReduces = failedReduces;
this.mapCounters = mapCounters;
this.reduceCounters = reduceCounters;
this.totalCounters = totalCounters;
}
JobFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new JobFinished();
datum.setJobid(new Utf8(jobId.toString()));
datum.setFinishTime(finishTime);
datum.setFinishedMaps(finishedMaps);
datum.setFinishedReduces(finishedReduces);
datum.setFailedMaps(failedMaps);
datum.setFailedReduces(failedReduces);
datum.setMapCounters(EventWriter.toAvro(mapCounters, "MAP_COUNTERS"));
datum.setReduceCounters(EventWriter.toAvro(reduceCounters,
"REDUCE_COUNTERS"));
datum.setTotalCounters(EventWriter.toAvro(totalCounters,
"TOTAL_COUNTERS"));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (JobFinished) oDatum;
this.jobId = JobID.forName(datum.getJobid().toString());
this.finishTime = datum.getFinishTime();
this.finishedMaps = datum.getFinishedMaps();
this.finishedReduces = datum.getFinishedReduces();
this.failedMaps = datum.getFailedMaps();
this.failedReduces = datum.getFailedReduces();
this.mapCounters = EventReader.fromAvro(datum.getMapCounters());
this.reduceCounters = EventReader.fromAvro(datum.getReduceCounters());
this.totalCounters = EventReader.fromAvro(datum.getTotalCounters());
}
public EventType getEventType() {
return EventType.JOB_FINISHED;
}
/** Get the Job ID */
public JobID getJobid() { return jobId; }
/** Get the job finish time */
public long getFinishTime() { return finishTime; }
/** Get the number of finished maps for the job */
public int getFinishedMaps() { return finishedMaps; }
/** Get the number of finished reducers for the job */
public int getFinishedReduces() { return finishedReduces; }
/** Get the number of failed maps for the job */
public int getFailedMaps() { return failedMaps; }
/** Get the number of failed reducers for the job */
public int getFailedReduces() { return failedReduces; }
/** Get the counters for the job */
public Counters getTotalCounters() {
return totalCounters;
}
/** Get the Map counters for the job */
public Counters getMapCounters() {
return mapCounters;
}
/** Get the reduce counters for the job */
public Counters getReduceCounters() {
return reduceCounters;
}
}
| 4,928 | 34.978102 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.DecimalFormat;
import java.text.Format;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.util.HostUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
/**
* HistoryViewer is used to parse and view the JobHistory files
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HistoryViewer {
private static final Log LOG = LogFactory.getLog(HistoryViewer.class);
private static final SimpleDateFormat dateFormat =
new SimpleDateFormat("d-MMM-yyyy HH:mm:ss");
private FileSystem fs;
private JobInfo job;
private String jobId;
private boolean printAll;
/**
* Constructs the HistoryViewer object
* @param historyFile The fully qualified Path of the History File
* @param conf The Configuration file
* @param printAll Toggle to print all status to only killed/failed status
* @throws IOException
*/
public HistoryViewer(String historyFile,
Configuration conf,
boolean printAll) throws IOException {
this.printAll = printAll;
String errorMsg = "Unable to initialize History Viewer";
try {
Path jobFile = new Path(historyFile);
fs = jobFile.getFileSystem(conf);
String[] jobDetails =
jobFile.getName().split("_");
if (jobDetails.length < 2) {
// NOT a valid name
System.err.println("Ignore unrecognized file: " + jobFile.getName());
throw new IOException(errorMsg);
}
final Path jobConfPath = new Path(jobFile.getParent(), jobDetails[0]
+ "_" + jobDetails[1] + "_" + jobDetails[2] + "_conf.xml");
final Configuration jobConf = new Configuration(conf);
try {
jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
Limits.reset(jobConf);
} catch (FileNotFoundException fnf) {
if (LOG.isWarnEnabled()) {
LOG.warn("Missing job conf in history", fnf);
}
}
JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
job = parser.parse();
jobId = job.getJobId().toString();
} catch(Exception e) {
throw new IOException(errorMsg, e);
}
}
/**
* Print the job/task/attempt summary information
* @throws IOException
*/
public void print() throws IOException{
printJobDetails();
printTaskSummary();
printJobAnalysis();
printTasks(TaskType.JOB_SETUP, TaskStatus.State.FAILED.toString());
printTasks(TaskType.JOB_SETUP, TaskStatus.State.KILLED.toString());
printTasks(TaskType.MAP, TaskStatus.State.FAILED.toString());
printTasks(TaskType.MAP, TaskStatus.State.KILLED.toString());
printTasks(TaskType.REDUCE, TaskStatus.State.FAILED.toString());
printTasks(TaskType.REDUCE, TaskStatus.State.KILLED.toString());
printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.FAILED.toString());
printTasks(TaskType.JOB_CLEANUP,
JobStatus.getJobRunState(JobStatus.KILLED));
if (printAll) {
printTasks(TaskType.JOB_SETUP, TaskStatus.State.SUCCEEDED.toString());
printTasks(TaskType.MAP, TaskStatus.State.SUCCEEDED.toString());
printTasks(TaskType.REDUCE, TaskStatus.State.SUCCEEDED.toString());
printTasks(TaskType.JOB_CLEANUP, TaskStatus.State.SUCCEEDED.toString());
printAllTaskAttempts(TaskType.JOB_SETUP);
printAllTaskAttempts(TaskType.MAP);
printAllTaskAttempts(TaskType.REDUCE);
printAllTaskAttempts(TaskType.JOB_CLEANUP);
}
FilteredJob filter = new FilteredJob(job,
TaskStatus.State.FAILED.toString());
printFailedAttempts(filter);
filter = new FilteredJob(job,
TaskStatus.State.KILLED.toString());
printFailedAttempts(filter);
}
private void printJobDetails() {
StringBuffer jobDetails = new StringBuffer();
jobDetails.append("\nHadoop job: " ).append(job.getJobId());
jobDetails.append("\n=====================================");
jobDetails.append("\nUser: ").append(job.getUsername());
jobDetails.append("\nJobName: ").append(job.getJobname());
jobDetails.append("\nJobConf: ").append(job.getJobConfPath());
jobDetails.append("\nSubmitted At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getSubmitTime(), 0));
jobDetails.append("\nLaunched At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getLaunchTime(),
job.getSubmitTime()));
jobDetails.append("\nFinished At: ").append(StringUtils.
getFormattedTimeWithDiff(dateFormat,
job.getFinishTime(),
job.getLaunchTime()));
jobDetails.append("\nStatus: ").append(((job.getJobStatus() == null) ?
"Incomplete" :job.getJobStatus()));
printCounters(jobDetails, job.getTotalCounters(), job.getMapCounters(),
job.getReduceCounters());
jobDetails.append("\n");
jobDetails.append("\n=====================================");
System.out.println(jobDetails.toString());
}
private void printCounters(StringBuffer buff, Counters totalCounters,
Counters mapCounters, Counters reduceCounters) {
// Killed jobs might not have counters
if (totalCounters == null) {
return;
}
buff.append("\nCounters: \n\n");
buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|",
"Group Name",
"Counter name",
"Map Value",
"Reduce Value",
"Total Value"));
buff.append("\n------------------------------------------"+
"---------------------------------------------");
for (String groupName : totalCounters.getGroupNames()) {
CounterGroup totalGroup = totalCounters.getGroup(groupName);
CounterGroup mapGroup = mapCounters.getGroup(groupName);
CounterGroup reduceGroup = reduceCounters.getGroup(groupName);
Format decimal = new DecimalFormat();
Iterator<org.apache.hadoop.mapreduce.Counter> ctrItr =
totalGroup.iterator();
while(ctrItr.hasNext()) {
org.apache.hadoop.mapreduce.Counter counter = ctrItr.next();
String name = counter.getName();
String mapValue =
decimal.format(mapGroup.findCounter(name).getValue());
String reduceValue =
decimal.format(reduceGroup.findCounter(name).getValue());
String totalValue =
decimal.format(counter.getValue());
buff.append(
String.format("%n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
totalGroup.getDisplayName(),
counter.getDisplayName(),
mapValue, reduceValue, totalValue));
}
}
}
private void printAllTaskAttempts(TaskType taskType) {
Map<TaskID, TaskInfo> tasks = job.getAllTasks();
StringBuffer taskList = new StringBuffer();
taskList.append("\n").append(taskType);
taskList.append(" task list for ").append(job.getJobId());
taskList.append("\nTaskId\t\tStartTime");
if (TaskType.REDUCE.equals(taskType)) {
taskList.append("\tShuffleFinished\tSortFinished");
}
taskList.append("\tFinishTime\tHostName\tError\tTaskLogs");
taskList.append("\n====================================================");
System.out.println(taskList.toString());
for (JobHistoryParser.TaskInfo task : tasks.values()) {
for (JobHistoryParser.TaskAttemptInfo attempt :
task.getAllTaskAttempts().values()) {
if (taskType.equals(task.getTaskType())){
taskList.setLength(0);
taskList.append(attempt.getAttemptId()).append("\t");
taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
attempt.getStartTime(), 0)).append("\t");
if (TaskType.REDUCE.equals(taskType)) {
taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
attempt.getShuffleFinishTime(),
attempt.getStartTime()));
taskList.append("\t");
taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
attempt.getSortFinishTime(),
attempt.getShuffleFinishTime()));
}
taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
attempt.getFinishTime(),
attempt.getStartTime()));
taskList.append("\t");
taskList.append(attempt.getHostname()).append("\t");
taskList.append(attempt.getError());
String taskLogsUrl = getTaskLogsUrl(
WebAppUtils.getHttpSchemePrefix(fs.getConf()), attempt);
taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
System.out.println(taskList.toString());
}
}
}
}
private void printTaskSummary() {
SummarizedJob ts = new SummarizedJob(job);
StringBuffer taskSummary = new StringBuffer();
taskSummary.append("\nTask Summary");
taskSummary.append("\n============================");
taskSummary.append("\nKind\tTotal\t");
taskSummary.append("Successful\tFailed\tKilled\tStartTime\tFinishTime");
taskSummary.append("\n");
taskSummary.append("\nSetup\t").append(ts.totalSetups);
taskSummary.append("\t").append(ts.numFinishedSetups);
taskSummary.append("\t\t").append(ts.numFailedSetups);
taskSummary.append("\t").append(ts.numKilledSetups);
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.setupStarted, 0));
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.setupFinished, ts.setupStarted));
taskSummary.append("\nMap\t").append(ts.totalMaps);
taskSummary.append("\t").append(job.getFinishedMaps());
taskSummary.append("\t\t").append(ts.numFailedMaps);
taskSummary.append("\t").append(ts.numKilledMaps);
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.mapStarted, 0));
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.mapFinished, ts.mapStarted));
taskSummary.append("\nReduce\t").append(ts.totalReduces);
taskSummary.append("\t").append(job.getFinishedReduces());
taskSummary.append("\t\t").append(ts.numFailedReduces);
taskSummary.append("\t").append(ts.numKilledReduces);
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.reduceStarted, 0));
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.reduceFinished, ts.reduceStarted));
taskSummary.append("\nCleanup\t").append(ts.totalCleanups);
taskSummary.append("\t").append(ts.numFinishedCleanups);
taskSummary.append("\t\t").append(ts.numFailedCleanups);
taskSummary.append("\t").append(ts.numKilledCleanups);
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.cleanupStarted, 0));
taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, ts.cleanupFinished,
ts.cleanupStarted));
taskSummary.append("\n============================\n");
System.out.println(taskSummary.toString());
}
private void printJobAnalysis() {
if (!job.getJobStatus().equals
(JobStatus.getJobRunState(JobStatus.SUCCEEDED))) {
System.out.println("No Analysis available as job did not finish");
return;
}
AnalyzedJob avg = new AnalyzedJob(job);
System.out.println("\nAnalysis");
System.out.println("=========");
printAnalysis(avg.getMapTasks(), cMap, "map", avg.getAvgMapTime(), 10);
printLast(avg.getMapTasks(), "map", cFinishMapRed);
if (avg.getReduceTasks().length > 0) {
printAnalysis(avg.getReduceTasks(), cShuffle, "shuffle",
avg.getAvgShuffleTime(), 10);
printLast(avg.getReduceTasks(), "shuffle", cFinishShuffle);
printAnalysis(avg.getReduceTasks(), cReduce, "reduce",
avg.getAvgReduceTime(), 10);
printLast(avg.getReduceTasks(), "reduce", cFinishMapRed);
}
System.out.println("=========");
}
private void printAnalysis(JobHistoryParser.TaskAttemptInfo [] tasks,
Comparator<JobHistoryParser.TaskAttemptInfo> cmp,
String taskType,
long avg,
int showTasks) {
Arrays.sort(tasks, cmp);
JobHistoryParser.TaskAttemptInfo min = tasks[tasks.length-1];
StringBuffer details = new StringBuffer();
details.append("\nTime taken by best performing ");
details.append(taskType).append(" task ");
details.append(min.getAttemptId().getTaskID().toString()).append(": ");
if ("map".equals(taskType)) {
details.append(StringUtils.formatTimeDiff(
min.getFinishTime(),
min.getStartTime()));
} else if ("shuffle".equals(taskType)) {
details.append(StringUtils.formatTimeDiff(
min.getShuffleFinishTime(),
min.getStartTime()));
} else {
details.append(StringUtils.formatTimeDiff(
min.getFinishTime(),
min.getShuffleFinishTime()));
}
details.append("\nAverage time taken by ");
details.append(taskType).append(" tasks: ");
details.append(StringUtils.formatTimeDiff(avg, 0));
details.append("\nWorse performing ");
details.append(taskType).append(" tasks: ");
details.append("\nTaskId\t\tTimetaken");
System.out.println(details.toString());
for (int i = 0; i < showTasks && i < tasks.length; i++) {
details.setLength(0);
details.append(tasks[i].getAttemptId().getTaskID()).append(" ");
if ("map".equals(taskType)) {
details.append(StringUtils.formatTimeDiff(
tasks[i].getFinishTime(),
tasks[i].getStartTime()));
} else if ("shuffle".equals(taskType)) {
details.append(StringUtils.formatTimeDiff(
tasks[i].getShuffleFinishTime(),
tasks[i].getStartTime()));
} else {
details.append(StringUtils.formatTimeDiff(
tasks[i].getFinishTime(),
tasks[i].getShuffleFinishTime()));
}
System.out.println(details.toString());
}
}
private void printLast(JobHistoryParser.TaskAttemptInfo [] tasks,
String taskType,
Comparator<JobHistoryParser.TaskAttemptInfo> cmp
) {
Arrays.sort(tasks, cFinishMapRed);
JobHistoryParser.TaskAttemptInfo last = tasks[0];
StringBuffer lastBuf = new StringBuffer();
lastBuf.append("The last ").append(taskType);
lastBuf.append(" task ").append(last.getAttemptId().getTaskID());
Long finishTime;
if ("shuffle".equals(taskType)) {
finishTime = last.getShuffleFinishTime();
} else {
finishTime = last.getFinishTime();
}
lastBuf.append(" finished at (relative to the Job launch time): ");
lastBuf.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
finishTime, job.getLaunchTime()));
System.out.println(lastBuf.toString());
}
private void printTasks(TaskType taskType, String status) {
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
StringBuffer header = new StringBuffer();
header.append("\n").append(status).append(" ");
header.append(taskType).append(" task list for ").append(jobId);
header.append("\nTaskId\t\tStartTime\tFinishTime\tError");
if (TaskType.MAP.equals(taskType)) {
header.append("\tInputSplits");
}
header.append("\n====================================================");
StringBuffer taskList = new StringBuffer();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
if (taskType.equals(task.getTaskType()) &&
(status.equals(task.getTaskStatus())
|| status.equalsIgnoreCase("ALL"))) {
taskList.setLength(0);
taskList.append(task.getTaskId());
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getStartTime(), 0));
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getFinishTime(),
task.getStartTime()));
taskList.append("\t").append(task.getError());
if (TaskType.MAP.equals(taskType)) {
taskList.append("\t").append(task.getSplitLocations());
}
if (taskList != null) {
System.out.println(header.toString());
System.out.println(taskList.toString());
}
}
}
}
private void printFailedAttempts(FilteredJob filteredJob) {
Map<String, Set<TaskID>> badNodes = filteredJob.getFilteredMap();
StringBuffer attempts = new StringBuffer();
if (badNodes.size() > 0) {
attempts.append("\n").append(filteredJob.getFilter());
attempts.append(" task attempts by nodes");
attempts.append("\nHostname\tFailedTasks");
attempts.append("\n===============================");
System.out.println(attempts.toString());
for (Map.Entry<String,
Set<TaskID>> entry : badNodes.entrySet()) {
String node = entry.getKey();
Set<TaskID> failedTasks = entry.getValue();
attempts.setLength(0);
attempts.append(node).append("\t");
for (TaskID t : failedTasks) {
attempts.append(t).append(", ");
}
System.out.println(attempts.toString());
}
}
}
/**
* Return the TaskLogsUrl of a particular TaskAttempt
*
* @param attempt
* @return the taskLogsUrl. null if http-port or tracker-name or
* task-attempt-id are unavailable.
*/
public static String getTaskLogsUrl(String scheme,
JobHistoryParser.TaskAttemptInfo attempt) {
if (attempt.getHttpPort() == -1
|| attempt.getTrackerName().equals("")
|| attempt.getAttemptId() == null) {
return null;
}
String taskTrackerName =
HostUtil.convertTrackerNameToHostName(
attempt.getTrackerName());
return HostUtil.getTaskLogUrl(scheme, taskTrackerName,
Integer.toString(attempt.getHttpPort()),
attempt.getAttemptId().toString());
}
private Comparator<JobHistoryParser.TaskAttemptInfo> cMap =
new Comparator<JobHistoryParser.TaskAttemptInfo>() {
public int compare(JobHistoryParser.TaskAttemptInfo t1,
JobHistoryParser.TaskAttemptInfo t2) {
long l1 = t1.getFinishTime() - t1.getStartTime();
long l2 = t2.getFinishTime() - t2.getStartTime();
return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
}
};
private Comparator<JobHistoryParser.TaskAttemptInfo> cShuffle =
new Comparator<JobHistoryParser.TaskAttemptInfo>() {
public int compare(JobHistoryParser.TaskAttemptInfo t1,
JobHistoryParser.TaskAttemptInfo t2) {
long l1 = t1.getShuffleFinishTime() - t1.getStartTime();
long l2 = t2.getShuffleFinishTime() - t2.getStartTime();
return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
}
};
private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishShuffle =
new Comparator<JobHistoryParser.TaskAttemptInfo>() {
public int compare(JobHistoryParser.TaskAttemptInfo t1,
JobHistoryParser.TaskAttemptInfo t2) {
long l1 = t1.getShuffleFinishTime();
long l2 = t2.getShuffleFinishTime();
return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
}
};
private Comparator<JobHistoryParser.TaskAttemptInfo> cFinishMapRed =
new Comparator<JobHistoryParser.TaskAttemptInfo>() {
public int compare(JobHistoryParser.TaskAttemptInfo t1,
JobHistoryParser.TaskAttemptInfo t2) {
long l1 = t1.getFinishTime();
long l2 = t2.getFinishTime();
return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
}
};
private Comparator<JobHistoryParser.TaskAttemptInfo> cReduce =
new Comparator<JobHistoryParser.TaskAttemptInfo>() {
public int compare(JobHistoryParser.TaskAttemptInfo t1,
JobHistoryParser.TaskAttemptInfo t2) {
long l1 = t1.getFinishTime() -
t1.getShuffleFinishTime();
long l2 = t2.getFinishTime() -
t2.getShuffleFinishTime();
return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
}
};
/**
* Utility class used the summarize the job.
* Used by HistoryViewer and the JobHistory UI.
*
*/
public static class SummarizedJob {
Map<TaskID, JobHistoryParser.TaskInfo> tasks;
int totalMaps = 0;
int totalReduces = 0;
int totalCleanups = 0;
int totalSetups = 0;
int numFailedMaps = 0;
int numKilledMaps = 0;
int numFailedReduces = 0;
int numKilledReduces = 0;
int numFinishedCleanups = 0;
int numFailedCleanups = 0;
int numKilledCleanups = 0;
int numFinishedSetups = 0;
int numFailedSetups = 0;
int numKilledSetups = 0;
long mapStarted = 0;
long mapFinished = 0;
long reduceStarted = 0;
long reduceFinished = 0;
long cleanupStarted = 0;
long cleanupFinished = 0;
long setupStarted = 0;
long setupFinished = 0;
/** Get total maps */
public int getTotalMaps() { return totalMaps; }
/** Get total reduces */
public int getTotalReduces() { return totalReduces; }
/** Get number of clean up tasks */
public int getTotalCleanups() { return totalCleanups; }
/** Get number of set up tasks */
public int getTotalSetups() { return totalSetups; }
/** Get number of failed maps */
public int getNumFailedMaps() { return numFailedMaps; }
/** Get number of killed maps */
public int getNumKilledMaps() { return numKilledMaps; }
/** Get number of failed reduces */
public int getNumFailedReduces() { return numFailedReduces; }
/** Get number of killed reduces */
public int getNumKilledReduces() { return numKilledReduces; }
/** Get number of cleanup tasks that finished */
public int getNumFinishedCleanups() { return numFinishedCleanups; }
/** Get number of failed cleanup tasks */
public int getNumFailedCleanups() { return numFailedCleanups; }
/** Get number of killed cleanup tasks */
public int getNumKilledCleanups() { return numKilledCleanups; }
/** Get number of finished set up tasks */
public int getNumFinishedSetups() { return numFinishedSetups; }
/** Get number of failed set up tasks */
public int getNumFailedSetups() { return numFailedSetups; }
/** Get number of killed set up tasks */
public int getNumKilledSetups() { return numKilledSetups; }
/** Get number of maps that were started */
public long getMapStarted() { return mapStarted; }
/** Get number of maps that finished */
public long getMapFinished() { return mapFinished; }
/** Get number of Reducers that were started */
public long getReduceStarted() { return reduceStarted; }
/** Get number of reducers that finished */
public long getReduceFinished() { return reduceFinished; }
/** Get number of cleanup tasks started */
public long getCleanupStarted() { return cleanupStarted; }
/** Get number of cleanup tasks that finished */
public long getCleanupFinished() { return cleanupFinished; }
/** Get number of setup tasks that started */
public long getSetupStarted() { return setupStarted; }
/** Get number of setup tasks that finished */
public long getSetupFinished() { return setupFinished; }
/** Create summary information for the parsed job */
public SummarizedJob(JobInfo job) {
tasks = job.getAllTasks();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
//allHosts.put(task.getHo(Keys.HOSTNAME), "");
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
long startTime = attempt.getStartTime();
long finishTime = attempt.getFinishTime();
if (attempt.getTaskType().equals(TaskType.MAP)) {
if (mapStarted== 0 || mapStarted > startTime) {
mapStarted = startTime;
}
if (mapFinished < finishTime) {
mapFinished = finishTime;
}
totalMaps++;
if (attempt.getTaskStatus().equals
(TaskStatus.State.FAILED.toString())) {
numFailedMaps++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.KILLED.toString())) {
numKilledMaps++;
}
} else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
if (reduceStarted==0||reduceStarted > startTime) {
reduceStarted = startTime;
}
if (reduceFinished < finishTime) {
reduceFinished = finishTime;
}
totalReduces++;
if (attempt.getTaskStatus().equals
(TaskStatus.State.FAILED.toString())) {
numFailedReduces++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.KILLED.toString())) {
numKilledReduces++;
}
} else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) {
if (cleanupStarted==0||cleanupStarted > startTime) {
cleanupStarted = startTime;
}
if (cleanupFinished < finishTime) {
cleanupFinished = finishTime;
}
totalCleanups++;
if (attempt.getTaskStatus().equals
(TaskStatus.State.SUCCEEDED.toString())) {
numFinishedCleanups++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.FAILED.toString())) {
numFailedCleanups++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.KILLED.toString())) {
numKilledCleanups++;
}
} else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) {
if (setupStarted==0||setupStarted > startTime) {
setupStarted = startTime;
}
if (setupFinished < finishTime) {
setupFinished = finishTime;
}
totalSetups++;
if (attempt.getTaskStatus().equals
(TaskStatus.State.SUCCEEDED.toString())) {
numFinishedSetups++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.FAILED.toString())) {
numFailedSetups++;
} else if (attempt.getTaskStatus().equals
(TaskStatus.State.KILLED.toString())) {
numKilledSetups++;
}
}
}
}
}
}
/**
* Utility class used while analyzing the job.
* Used by HistoryViewer and the JobHistory UI.
*/
public static class AnalyzedJob {
private long avgMapTime;
private long avgReduceTime;
private long avgShuffleTime;
private JobHistoryParser.TaskAttemptInfo [] mapTasks;
private JobHistoryParser.TaskAttemptInfo [] reduceTasks;
/** Get the average map time */
public long getAvgMapTime() { return avgMapTime; }
/** Get the average reduce time */
public long getAvgReduceTime() { return avgReduceTime; }
/** Get the average shuffle time */
public long getAvgShuffleTime() { return avgShuffleTime; }
/** Get the map tasks list */
public JobHistoryParser.TaskAttemptInfo [] getMapTasks() {
return mapTasks;
}
/** Get the reduce tasks list */
public JobHistoryParser.TaskAttemptInfo [] getReduceTasks() {
return reduceTasks;
}
/** Generate analysis information for the parsed job */
public AnalyzedJob (JobInfo job) {
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
int finishedMaps = (int) job.getFinishedMaps();
int finishedReduces = (int) job.getFinishedReduces();
mapTasks =
new JobHistoryParser.TaskAttemptInfo[finishedMaps];
reduceTasks =
new JobHistoryParser.TaskAttemptInfo[finishedReduces];
int mapIndex = 0 , reduceIndex=0;
avgMapTime = 0;
avgReduceTime = 0;
avgShuffleTime = 0;
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
if (attempt.getTaskStatus().
equals(TaskStatus.State.SUCCEEDED.toString())) {
long avgFinishTime = (attempt.getFinishTime() -
attempt.getStartTime());
if (attempt.getTaskType().equals(TaskType.MAP)) {
mapTasks[mapIndex++] = attempt;
avgMapTime += avgFinishTime;
} else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
reduceTasks[reduceIndex++] = attempt;
avgShuffleTime += (attempt.getShuffleFinishTime() -
attempt.getStartTime());
avgReduceTime += (attempt.getFinishTime() -
attempt.getShuffleFinishTime());
}
break;
}
}
}
if (finishedMaps > 0) {
avgMapTime /= finishedMaps;
}
if (finishedReduces > 0) {
avgReduceTime /= finishedReduces;
avgShuffleTime /= finishedReduces;
}
}
}
/**
* Utility to filter out events based on the task status
*
*/
public static class FilteredJob {
private Map<String, Set<TaskID>> badNodesToFilteredTasks =
new HashMap<String, Set<TaskID>>();
private String filter;
/** Get the map of the filtered tasks */
public Map<String, Set<TaskID>> getFilteredMap() {
return badNodesToFilteredTasks;
}
/** Get the current filter */
public String getFilter() { return filter; }
/** Apply the filter (status) on the parsed job and generate summary */
public FilteredJob(JobInfo job, String status) {
filter = status;
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
if (attempt.getTaskStatus().equals(status)) {
String hostname = attempt.getHostname();
TaskID id = attempt.getAttemptId().getTaskID();
Set<TaskID> set = badNodesToFilteredTasks.get(hostname);
if (set == null) {
set = new TreeSet<TaskID>();
set.add(id);
badNodesToFilteredTasks.put(hostname, set);
}else{
set.add(id);
}
}
}
}
}
}
}
| 33,273 | 39.826994 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record the successful completion of a task
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskFinishedEvent implements HistoryEvent {
private TaskFinished datum = null;
private TaskID taskid;
private TaskAttemptID successfulAttemptId;
private long finishTime;
private TaskType taskType;
private String status;
private Counters counters;
/**
* Create an event to record the successful completion of a task
* @param id Task ID
* @param attemptId Task Attempt ID of the successful attempt for this task
* @param finishTime Finish time of the task
* @param taskType Type of the task
* @param status Status string
* @param counters Counters for the task
*/
public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime,
TaskType taskType,
String status, Counters counters) {
this.taskid = id;
this.successfulAttemptId = attemptId;
this.finishTime = finishTime;
this.taskType = taskType;
this.status = status;
this.counters = counters;
}
TaskFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new TaskFinished();
datum.setTaskid(new Utf8(taskid.toString()));
if(successfulAttemptId != null)
{
datum.setSuccessfulAttemptId(new Utf8(successfulAttemptId.toString()));
}
datum.setFinishTime(finishTime);
datum.setCounters(EventWriter.toAvro(counters));
datum.setTaskType(new Utf8(taskType.name()));
datum.setStatus(new Utf8(status));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (TaskFinished)oDatum;
this.taskid = TaskID.forName(datum.getTaskid().toString());
if (datum.getSuccessfulAttemptId() != null) {
this.successfulAttemptId = TaskAttemptID
.forName(datum.getSuccessfulAttemptId().toString());
}
this.finishTime = datum.getFinishTime();
this.taskType = TaskType.valueOf(datum.getTaskType().toString());
this.status = datum.getStatus().toString();
this.counters = EventReader.fromAvro(datum.getCounters());
}
/** Get task id */
public TaskID getTaskId() { return taskid; }
/** Get successful task attempt id */
public TaskAttemptID getSuccessfulTaskAttemptId() {
return successfulAttemptId;
}
/** Get the task finish time */
public long getFinishTime() { return finishTime; }
/** Get task counters */
public Counters getCounters() { return counters; }
/** Get task type */
public TaskType getTaskType() {
return taskType;
}
/** Get task status */
public String getTaskStatus() { return status.toString(); }
/** Get event type */
public EventType getEventType() {
return EventType.TASK_FINISHED;
}
}
| 4,012 | 32.441667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record the normalized map/reduce requirements.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NormalizedResourceEvent implements HistoryEvent {
private int memory;
private TaskType taskType;
/**
* Normalized request when sent to the Resource Manager.
* @param taskType the tasktype of the request.
* @param memory the normalized memory requirements.
*/
public NormalizedResourceEvent(TaskType taskType, int memory) {
this.memory = memory;
this.taskType = taskType;
}
/**
* the tasktype for the event.
* @return the tasktype for the event.
*/
public TaskType getTaskType() {
return this.taskType;
}
/**
* the normalized memory
* @return the normalized memory
*/
public int getMemory() {
return this.memory;
}
@Override
public EventType getEventType() {
return EventType.NORMALIZED_RESOURCE;
}
@Override
public Object getDatum() {
throw new UnsupportedOperationException("Not a seriable object");
}
@Override
public void setDatum(Object datum) {
throw new UnsupportedOperationException("Not a seriable object");
}
}
| 2,192 | 28.635135 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record successful completion of a map attempt
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MapAttemptFinishedEvent implements HistoryEvent {
private MapAttemptFinished datum = null;
private TaskAttemptID attemptId;
private TaskType taskType;
private String taskStatus;
private long finishTime;
private String hostname;
private String rackName;
private int port;
private long mapFinishTime;
private String state;
private Counters counters;
int[][] allSplits;
int[] clockSplits;
int[] cpuUsages;
int[] vMemKbytes;
int[] physMemKbytes;
/**
* Create an event for successful completion of map attempts
* @param id Task Attempt ID
* @param taskType Type of the task
* @param taskStatus Status of the task
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
* @param port RPC port for the tracker host.
* @param rackName Name of the rack where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*
* If you have no splits data, code {@code null} for this
* parameter.
*/
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname, int port,
String rackName, String state, Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.mapFinishTime = mapFinishTime;
this.finishTime = finishTime;
this.hostname = hostname;
this.rackName = rackName;
this.port = port;
this.state = state;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
/**
* @deprecated please use the constructor with an additional
* argument, an array of splits arrays instead. See
* {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
* for an explanation of the meaning of that parameter.
*
* Create an event for successful completion of map attempts
* @param id Task Attempt ID
* @param taskType Type of the task
* @param taskStatus Status of the task
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
*/
@Deprecated
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname,
String state, Counters counters) {
this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, -1, "",
state, counters, null);
}
MapAttemptFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new MapAttemptFinished();
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setTaskType(new Utf8(taskType.name()));
datum.setTaskStatus(new Utf8(taskStatus));
datum.setMapFinishTime(mapFinishTime);
datum.setFinishTime(finishTime);
datum.setHostname(new Utf8(hostname));
datum.setPort(port);
if (rackName != null) {
datum.setRackname(new Utf8(rackName));
}
datum.setState(new Utf8(state));
datum.setCounters(EventWriter.toAvro(counters));
datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetWallclockTime(allSplits)));
datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetCPUTime(allSplits)));
datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetVMemKbytes(allSplits)));
datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetPhysMemKbytes(allSplits)));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (MapAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.getAttemptId().toString());
this.taskType = TaskType.valueOf(datum.getTaskType().toString());
this.taskStatus = datum.getTaskStatus().toString();
this.mapFinishTime = datum.getMapFinishTime();
this.finishTime = datum.getFinishTime();
this.hostname = datum.getHostname().toString();
this.rackName = datum.getRackname().toString();
this.port = datum.getPort();
this.state = datum.getState().toString();
this.counters = EventReader.fromAvro(datum.getCounters());
this.clockSplits = AvroArrayUtils.fromAvro(datum.getClockSplits());
this.cpuUsages = AvroArrayUtils.fromAvro(datum.getCpuUsages());
this.vMemKbytes = AvroArrayUtils.fromAvro(datum.getVMemKbytes());
this.physMemKbytes = AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
}
/** Get the task ID */
public TaskID getTaskId() { return attemptId.getTaskID(); }
/** Get the attempt id */
public TaskAttemptID getAttemptId() {
return attemptId;
}
/** Get the task type */
public TaskType getTaskType() {
return taskType;
}
/** Get the task status */
public String getTaskStatus() { return taskStatus.toString(); }
/** Get the map phase finish time */
public long getMapFinishTime() { return mapFinishTime; }
/** Get the attempt finish time */
public long getFinishTime() { return finishTime; }
/** Get the host name */
public String getHostname() { return hostname.toString(); }
/** Get the tracker rpc port */
public int getPort() { return port; }
/** Get the rack name */
public String getRackName() {
return rackName == null ? null : rackName.toString();
}
/** Get the state string */
public String getState() { return state.toString(); }
/** Get the counters */
Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
return EventType.MAP_ATTEMPT_FINISHED;
}
public int[] getClockSplits() {
return clockSplits;
}
public int[] getCpuUsages() {
return cpuUsages;
}
public int[] getVMemKbytes() {
return vMemKbytes;
}
public int[] getPhysMemKbytes() {
return physMemKbytes;
}
}
| 8,160 | 35.761261 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AMStartedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.avro.util.Utf8;
/**
* Event to record start of a task attempt
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class AMStartedEvent implements HistoryEvent {
private AMStarted datum = new AMStarted();
private String forcedJobStateOnShutDown;
private long submitTime;
/**
* Create an event to record the start of an MR AppMaster
*
* @param appAttemptId
* the application attempt id.
* @param startTime
* the start time of the AM.
* @param containerId
* the containerId of the AM.
* @param nodeManagerHost
* the node on which the AM is running.
* @param nodeManagerPort
* the port on which the AM is running.
* @param nodeManagerHttpPort
* the httpPort for the node running the AM.
*/
public AMStartedEvent(ApplicationAttemptId appAttemptId, long startTime,
ContainerId containerId, String nodeManagerHost, int nodeManagerPort,
int nodeManagerHttpPort, long submitTime) {
this(appAttemptId, startTime, containerId, nodeManagerHost,
nodeManagerPort, nodeManagerHttpPort, null, submitTime);
}
/**
* Create an event to record the start of an MR AppMaster
*
* @param appAttemptId
* the application attempt id.
* @param startTime
* the start time of the AM.
* @param containerId
* the containerId of the AM.
* @param nodeManagerHost
* the node on which the AM is running.
* @param nodeManagerPort
* the port on which the AM is running.
* @param nodeManagerHttpPort
* the httpPort for the node running the AM.
* @param forcedJobStateOnShutDown
* the state to force the job into
*/
public AMStartedEvent(ApplicationAttemptId appAttemptId, long startTime,
ContainerId containerId, String nodeManagerHost, int nodeManagerPort,
int nodeManagerHttpPort, String forcedJobStateOnShutDown,
long submitTime) {
datum.setApplicationAttemptId(new Utf8(appAttemptId.toString()));
datum.setStartTime(startTime);
datum.setContainerId(new Utf8(containerId.toString()));
datum.setNodeManagerHost(new Utf8(nodeManagerHost));
datum.setNodeManagerPort(nodeManagerPort);
datum.setNodeManagerHttpPort(nodeManagerHttpPort);
this.forcedJobStateOnShutDown = forcedJobStateOnShutDown;
this.submitTime = submitTime;
}
AMStartedEvent() {
}
public Object getDatum() {
return datum;
}
public void setDatum(Object datum) {
this.datum = (AMStarted) datum;
}
/**
* @return the ApplicationAttemptId
*/
public ApplicationAttemptId getAppAttemptId() {
return ConverterUtils.toApplicationAttemptId(datum.getApplicationAttemptId()
.toString());
}
/**
* @return the start time for the MRAppMaster
*/
public long getStartTime() {
return datum.getStartTime();
}
/**
* @return the ContainerId for the MRAppMaster.
*/
public ContainerId getContainerId() {
return ConverterUtils.toContainerId(datum.getContainerId().toString());
}
/**
* @return the node manager host.
*/
public String getNodeManagerHost() {
return datum.getNodeManagerHost().toString();
}
/**
* @return the node manager port.
*/
public int getNodeManagerPort() {
return datum.getNodeManagerPort();
}
/**
* @return the http port for the tracker.
*/
public int getNodeManagerHttpPort() {
return datum.getNodeManagerHttpPort();
}
/**
* @return the state to force the job into
*/
public String getForcedJobStateOnShutDown() {
return this.forcedJobStateOnShutDown;
}
/**
* @return the submit time for the Application(Job)
*/
public long getSubmitTime() {
return this.submitTime;
}
/** Get the attempt id */
@Override
public EventType getEventType() {
return EventType.AM_STARTED;
}
}
| 5,074 | 28.852941 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInfoChangeEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.avro.util.Utf8;
/**
* Event to record changes in the submit and launch time of
* a job
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobInfoChangeEvent implements HistoryEvent {
private JobInfoChange datum = new JobInfoChange();
/**
* Create a event to record the submit and launch time of a job
* @param id Job Id
* @param submitTime Submit time of the job
* @param launchTime Launch time of the job
*/
public JobInfoChangeEvent(JobID id, long submitTime, long launchTime) {
datum.setJobid(new Utf8(id.toString()));
datum.setSubmitTime(submitTime);
datum.setLaunchTime(launchTime);
}
JobInfoChangeEvent() { }
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (JobInfoChange)datum;
}
/** Get the Job ID */
public JobID getJobId() { return JobID.forName(datum.getJobid().toString()); }
/** Get the Job submit time */
public long getSubmitTime() { return datum.getSubmitTime(); }
/** Get the Job launch time */
public long getLaunchTime() { return datum.getLaunchTime(); }
public EventType getEventType() {
return EventType.JOB_INFO_CHANGED;
}
}
| 2,258 | 31.73913 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import com.google.common.base.Joiner;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
import java.util.Collections;
/**
* Event to record Failed and Killed completion of jobs
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobUnsuccessfulCompletionEvent implements HistoryEvent {
private static final String NODIAGS = "";
private static final Iterable<String> NODIAGS_LIST =
Collections.singletonList(NODIAGS);
private JobUnsuccessfulCompletion datum
= new JobUnsuccessfulCompletion();
/**
* Create an event to record unsuccessful completion (killed/failed) of jobs
* @param id Job ID
* @param finishTime Finish time of the job
* @param finishedMaps Number of finished maps
* @param finishedReduces Number of finished reduces
* @param status Status of the job
*/
public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
int finishedMaps,
int finishedReduces, String status) {
this(id, finishTime, finishedMaps, finishedReduces, status, NODIAGS_LIST);
}
/**
* Create an event to record unsuccessful completion (killed/failed) of jobs
* @param id Job ID
* @param finishTime Finish time of the job
* @param finishedMaps Number of finished maps
* @param finishedReduces Number of finished reduces
* @param status Status of the job
* @param diagnostics job runtime diagnostics
*/
public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
int finishedMaps,
int finishedReduces,
String status,
Iterable<String> diagnostics) {
datum.setJobid(new Utf8(id.toString()));
datum.setFinishTime(finishTime);
datum.setFinishedMaps(finishedMaps);
datum.setFinishedReduces(finishedReduces);
datum.setJobStatus(new Utf8(status));
if (diagnostics == null) {
diagnostics = NODIAGS_LIST;
}
datum.setDiagnostics(new Utf8(Joiner.on('\n').skipNulls()
.join(diagnostics)));
}
JobUnsuccessfulCompletionEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) {
this.datum = (JobUnsuccessfulCompletion)datum;
}
/** Get the Job ID */
public JobID getJobId() {
return JobID.forName(datum.getJobid().toString());
}
/** Get the job finish time */
public long getFinishTime() { return datum.getFinishTime(); }
/** Get the number of finished maps */
public int getFinishedMaps() { return datum.getFinishedMaps(); }
/** Get the number of finished reduces */
public int getFinishedReduces() { return datum.getFinishedReduces(); }
/** Get the status */
public String getStatus() { return datum.getJobStatus().toString(); }
/** Get the event type */
public EventType getEventType() {
if ("FAILED".equals(getStatus())) {
return EventType.JOB_FAILED;
} else if ("ERROR".equals(getStatus())) {
return EventType.JOB_ERROR;
} else
return EventType.JOB_KILLED;
}
/**
* Retrieves diagnostics information preserved in the history file
*
* @return diagnostics as of the time of job termination
*/
public String getDiagnostics() {
final CharSequence diagnostics = datum.getDiagnostics();
return diagnostics == null ? NODIAGS : diagnostics.toString();
}
}
| 4,256 | 33.609756 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.EOFException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.util.StringInterner;
import org.apache.avro.Schema;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.DatumReader;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.specific.SpecificDatumReader;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class EventReader implements Closeable {
private String version;
private Schema schema;
private DataInputStream in;
private Decoder decoder;
private DatumReader reader;
/**
* Create a new Event Reader
* @param fs
* @param name
* @throws IOException
*/
public EventReader(FileSystem fs, Path name) throws IOException {
this (fs.open(name));
}
/**
* Create a new Event Reader
* @param in
* @throws IOException
*/
@SuppressWarnings("deprecation")
public EventReader(DataInputStream in) throws IOException {
this.in = in;
this.version = in.readLine();
Schema myschema = new SpecificData(Event.class.getClassLoader()).getSchema(Event.class);
Schema.Parser parser = new Schema.Parser();
this.schema = parser.parse(in.readLine());
this.reader = new SpecificDatumReader(schema, myschema);
if (EventWriter.VERSION.equals(version)) {
this.decoder = DecoderFactory.get().jsonDecoder(schema, in);
} else if (EventWriter.VERSION_BINARY.equals(version)) {
this.decoder = DecoderFactory.get().binaryDecoder(in, null);
} else {
throw new IOException("Incompatible event log version: " + version);
}
}
/**
* Get the next event from the stream
* @return the next event
* @throws IOException
*/
@SuppressWarnings("unchecked")
public HistoryEvent getNextEvent() throws IOException {
Event wrapper;
try {
wrapper = (Event)reader.read(null, decoder);
} catch (EOFException e) { // at EOF
return null;
}
HistoryEvent result;
switch (wrapper.getType()) {
case JOB_SUBMITTED:
result = new JobSubmittedEvent(); break;
case JOB_INITED:
result = new JobInitedEvent(); break;
case JOB_FINISHED:
result = new JobFinishedEvent(); break;
case JOB_PRIORITY_CHANGED:
result = new JobPriorityChangeEvent(); break;
case JOB_QUEUE_CHANGED:
result = new JobQueueChangeEvent(); break;
case JOB_STATUS_CHANGED:
result = new JobStatusChangedEvent(); break;
case JOB_FAILED:
result = new JobUnsuccessfulCompletionEvent(); break;
case JOB_KILLED:
result = new JobUnsuccessfulCompletionEvent(); break;
case JOB_ERROR:
result = new JobUnsuccessfulCompletionEvent(); break;
case JOB_INFO_CHANGED:
result = new JobInfoChangeEvent(); break;
case TASK_STARTED:
result = new TaskStartedEvent(); break;
case TASK_FINISHED:
result = new TaskFinishedEvent(); break;
case TASK_FAILED:
result = new TaskFailedEvent(); break;
case TASK_UPDATED:
result = new TaskUpdatedEvent(); break;
case MAP_ATTEMPT_STARTED:
result = new TaskAttemptStartedEvent(); break;
case MAP_ATTEMPT_FINISHED:
result = new MapAttemptFinishedEvent(); break;
case MAP_ATTEMPT_FAILED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case MAP_ATTEMPT_KILLED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case REDUCE_ATTEMPT_STARTED:
result = new TaskAttemptStartedEvent(); break;
case REDUCE_ATTEMPT_FINISHED:
result = new ReduceAttemptFinishedEvent(); break;
case REDUCE_ATTEMPT_FAILED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case REDUCE_ATTEMPT_KILLED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case SETUP_ATTEMPT_STARTED:
result = new TaskAttemptStartedEvent(); break;
case SETUP_ATTEMPT_FINISHED:
result = new TaskAttemptFinishedEvent(); break;
case SETUP_ATTEMPT_FAILED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case SETUP_ATTEMPT_KILLED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case CLEANUP_ATTEMPT_STARTED:
result = new TaskAttemptStartedEvent(); break;
case CLEANUP_ATTEMPT_FINISHED:
result = new TaskAttemptFinishedEvent(); break;
case CLEANUP_ATTEMPT_FAILED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case CLEANUP_ATTEMPT_KILLED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case AM_STARTED:
result = new AMStartedEvent(); break;
default:
throw new RuntimeException("unexpected event type: " + wrapper.getType());
}
result.setDatum(wrapper.getEvent());
return result;
}
/**
* Close the Event reader
* @throws IOException
*/
@Override
public void close() throws IOException {
if (in != null) {
in.close();
}
in = null;
}
static Counters fromAvro(JhCounters counters) {
Counters result = new Counters();
if(counters != null) {
for (JhCounterGroup g : counters.getGroups()) {
CounterGroup group =
result.addGroup(StringInterner.weakIntern(g.getName().toString()),
StringInterner.weakIntern(g.getDisplayName().toString()));
for (JhCounter c : g.getCounts()) {
group.addCounter(StringInterner.weakIntern(c.getName().toString()),
StringInterner.weakIntern(c.getDisplayName().toString()),
c.getValue());
}
}
}
return result;
}
}
| 6,800 | 33.522843 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.avro.util.Utf8;
/**
* Event to record the failure of a task
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskFailedEvent implements HistoryEvent {
private TaskFailed datum = null;
private TaskAttemptID failedDueToAttempt;
private TaskID id;
private TaskType taskType;
private long finishTime;
private String status;
private String error;
private Counters counters;
private static final Counters EMPTY_COUNTERS = new Counters();
/**
* Create an event to record task failure
* @param id Task ID
* @param finishTime Finish time of the task
* @param taskType Type of the task
* @param error Error String
* @param status Status
* @param failedDueToAttempt The attempt id due to which the task failed
* @param counters Counters for the task
*/
public TaskFailedEvent(TaskID id, long finishTime,
TaskType taskType, String error, String status,
TaskAttemptID failedDueToAttempt, Counters counters) {
this.id = id;
this.finishTime = finishTime;
this.taskType = taskType;
this.error = error;
this.status = status;
this.failedDueToAttempt = failedDueToAttempt;
this.counters = counters;
}
public TaskFailedEvent(TaskID id, long finishTime,
TaskType taskType, String error, String status,
TaskAttemptID failedDueToAttempt) {
this(id, finishTime, taskType, error, status,
failedDueToAttempt, EMPTY_COUNTERS);
}
TaskFailedEvent() {}
public Object getDatum() {
if(datum == null) {
datum = new TaskFailed();
datum.setTaskid(new Utf8(id.toString()));
datum.setError(new Utf8(error));
datum.setFinishTime(finishTime);
datum.setTaskType(new Utf8(taskType.name()));
datum.setFailedDueToAttempt(
failedDueToAttempt == null
? null
: new Utf8(failedDueToAttempt.toString()));
datum.setStatus(new Utf8(status));
datum.setCounters(EventWriter.toAvro(counters));
}
return datum;
}
public void setDatum(Object odatum) {
this.datum = (TaskFailed)odatum;
this.id =
TaskID.forName(datum.getTaskid().toString());
this.taskType =
TaskType.valueOf(datum.getTaskType().toString());
this.finishTime = datum.getFinishTime();
this.error = datum.getError().toString();
this.failedDueToAttempt =
datum.getFailedDueToAttempt() == null
? null
: TaskAttemptID.forName(
datum.getFailedDueToAttempt().toString());
this.status = datum.getStatus().toString();
this.counters =
EventReader.fromAvro(datum.getCounters());
}
/** Get the task id */
public TaskID getTaskId() { return id; }
/** Get the error string */
public String getError() { return error; }
/** Get the finish time of the attempt */
public long getFinishTime() {
return finishTime;
}
/** Get the task type */
public TaskType getTaskType() {
return taskType;
}
/** Get the attempt id due to which the task failed */
public TaskAttemptID getFailedAttemptID() {
return failedDueToAttempt;
}
/** Get the task status */
public String getTaskStatus() { return status; }
/** Get task counters */
public Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
return EventType.TASK_FAILED;
}
}
| 4,548 | 31.262411 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
/**
* Default Parser for the JobHistory files. Typical usage is
* JobHistoryParser parser = new JobHistoryParser(fs, historyFile);
* job = parser.parse();
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobHistoryParser implements HistoryEventHandler {
private static final Log LOG = LogFactory.getLog(JobHistoryParser.class);
private final FSDataInputStream in;
private JobInfo info = null;
private IOException parseException = null;
/**
* Create a job history parser for the given history file using the
* given file system
* @param fs
* @param file
* @throws IOException
*/
public JobHistoryParser(FileSystem fs, String file) throws IOException {
this(fs, new Path(file));
}
/**
* Create the job history parser for the given history file using the
* given file system
* @param fs
* @param historyFile
* @throws IOException
*/
public JobHistoryParser(FileSystem fs, Path historyFile)
throws IOException {
this(fs.open(historyFile));
}
/**
* Create the history parser based on the input stream
* @param in
*/
public JobHistoryParser(FSDataInputStream in) {
this.in = in;
}
public synchronized void parse(HistoryEventHandler handler)
throws IOException {
parse(new EventReader(in), handler);
}
/**
* Only used for unit tests.
*/
@Private
public synchronized void parse(EventReader reader, HistoryEventHandler handler)
throws IOException {
int eventCtr = 0;
HistoryEvent event;
try {
while ((event = reader.getNextEvent()) != null) {
handler.handleEvent(event);
++eventCtr;
}
} catch (IOException ioe) {
LOG.info("Caught exception parsing history file after " + eventCtr +
" events", ioe);
parseException = ioe;
} finally {
in.close();
}
}
/**
* Parse the entire history file and populate the JobInfo object
* The first invocation will populate the object, subsequent calls
* will return the already parsed object.
* The input stream is closed on return
*
* This api ignores partial records and stops parsing on encountering one.
* {@link #getParseException()} can be used to fetch the exception, if any.
*
* @return The populated jobInfo object
* @throws IOException
* @see #getParseException()
*/
public synchronized JobInfo parse() throws IOException {
return parse(new EventReader(in));
}
/**
* Only used for unit tests.
*/
@Private
public synchronized JobInfo parse(EventReader reader) throws IOException {
if (info != null) {
return info;
}
info = new JobInfo();
parse(reader, this);
return info;
}
/**
* Get the parse exception, if any.
*
* @return the parse exception, if any
* @see #parse()
*/
public synchronized IOException getParseException() {
return parseException;
}
@Override
public void handleEvent(HistoryEvent event) {
EventType type = event.getEventType();
switch (type) {
case JOB_SUBMITTED:
handleJobSubmittedEvent((JobSubmittedEvent)event);
break;
case JOB_STATUS_CHANGED:
break;
case JOB_INFO_CHANGED:
handleJobInfoChangeEvent((JobInfoChangeEvent) event);
break;
case JOB_INITED:
handleJobInitedEvent((JobInitedEvent) event);
break;
case JOB_PRIORITY_CHANGED:
handleJobPriorityChangeEvent((JobPriorityChangeEvent) event);
break;
case JOB_QUEUE_CHANGED:
handleJobQueueChangeEvent((JobQueueChangeEvent) event);
break;
case JOB_FAILED:
case JOB_KILLED:
case JOB_ERROR:
handleJobFailedEvent((JobUnsuccessfulCompletionEvent) event);
break;
case JOB_FINISHED:
handleJobFinishedEvent((JobFinishedEvent)event);
break;
case TASK_STARTED:
handleTaskStartedEvent((TaskStartedEvent) event);
break;
case TASK_FAILED:
handleTaskFailedEvent((TaskFailedEvent) event);
break;
case TASK_UPDATED:
handleTaskUpdatedEvent((TaskUpdatedEvent) event);
break;
case TASK_FINISHED:
handleTaskFinishedEvent((TaskFinishedEvent) event);
break;
case MAP_ATTEMPT_STARTED:
case CLEANUP_ATTEMPT_STARTED:
case REDUCE_ATTEMPT_STARTED:
case SETUP_ATTEMPT_STARTED:
handleTaskAttemptStartedEvent((TaskAttemptStartedEvent) event);
break;
case MAP_ATTEMPT_FAILED:
case CLEANUP_ATTEMPT_FAILED:
case REDUCE_ATTEMPT_FAILED:
case SETUP_ATTEMPT_FAILED:
case MAP_ATTEMPT_KILLED:
case CLEANUP_ATTEMPT_KILLED:
case REDUCE_ATTEMPT_KILLED:
case SETUP_ATTEMPT_KILLED:
handleTaskAttemptFailedEvent(
(TaskAttemptUnsuccessfulCompletionEvent) event);
break;
case MAP_ATTEMPT_FINISHED:
handleMapAttemptFinishedEvent((MapAttemptFinishedEvent) event);
break;
case REDUCE_ATTEMPT_FINISHED:
handleReduceAttemptFinishedEvent((ReduceAttemptFinishedEvent) event);
break;
case SETUP_ATTEMPT_FINISHED:
case CLEANUP_ATTEMPT_FINISHED:
handleTaskAttemptFinishedEvent((TaskAttemptFinishedEvent) event);
break;
case AM_STARTED:
handleAMStartedEvent((AMStartedEvent) event);
break;
default:
break;
}
}
private void handleTaskAttemptFinishedEvent(TaskAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
attemptInfo.sortFinishTime = event.getSortFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
attemptInfo.port = event.getPort();
attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
private void handleMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.mapFinishTime = event.getMapFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
attemptInfo.port = event.getPort();
attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
private void handleTaskAttemptFailedEvent(
TaskAttemptUnsuccessfulCompletionEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
if(taskInfo == null) {
LOG.warn("TaskInfo is null for TaskAttemptUnsuccessfulCompletionEvent"
+ " taskId: " + event.getTaskId().toString());
return;
}
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getTaskAttemptId());
if(attemptInfo == null) {
LOG.warn("AttemptInfo is null for TaskAttemptUnsuccessfulCompletionEvent"
+ " taskAttemptId: " + event.getTaskAttemptId().toString());
return;
}
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.error = StringInterner.weakIntern(event.getError());
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
attemptInfo.port = event.getPort();
attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
attemptInfo.shuffleFinishTime = event.getFinishTime();
attemptInfo.sortFinishTime = event.getFinishTime();
attemptInfo.mapFinishTime = event.getFinishTime();
attemptInfo.counters = event.getCounters();
if(TaskStatus.State.SUCCEEDED.toString().equals(taskInfo.status))
{
//this is a successful task
if(attemptInfo.getAttemptId().equals(taskInfo.getSuccessfulAttemptId()))
{
// the failed attempt is the one that made this task successful
// so its no longer successful. Reset fields set in
// handleTaskFinishedEvent()
taskInfo.counters = null;
taskInfo.finishTime = -1;
taskInfo.status = null;
taskInfo.successfulAttemptId = null;
}
}
info.completedTaskAttemptsMap.put(event.getTaskAttemptId(), attemptInfo);
}
private void handleTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {
TaskAttemptID attemptId = event.getTaskAttemptId();
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo = new TaskAttemptInfo();
attemptInfo.startTime = event.getStartTime();
attemptInfo.attemptId = event.getTaskAttemptId();
attemptInfo.httpPort = event.getHttpPort();
attemptInfo.trackerName = StringInterner.weakIntern(event.getTrackerName());
attemptInfo.taskType = event.getTaskType();
attemptInfo.shufflePort = event.getShufflePort();
attemptInfo.containerId = event.getContainerId();
taskInfo.attemptsMap.put(attemptId, attemptInfo);
}
private void handleTaskFinishedEvent(TaskFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
taskInfo.counters = event.getCounters();
taskInfo.finishTime = event.getFinishTime();
taskInfo.status = TaskStatus.State.SUCCEEDED.toString();
taskInfo.successfulAttemptId = event.getSuccessfulTaskAttemptId();
}
private void handleTaskUpdatedEvent(TaskUpdatedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
taskInfo.finishTime = event.getFinishTime();
}
private void handleTaskFailedEvent(TaskFailedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
taskInfo.status = TaskStatus.State.FAILED.toString();
taskInfo.finishTime = event.getFinishTime();
taskInfo.error = StringInterner.weakIntern(event.getError());
taskInfo.failedDueToAttemptId = event.getFailedAttemptID();
taskInfo.counters = event.getCounters();
}
private void handleTaskStartedEvent(TaskStartedEvent event) {
TaskInfo taskInfo = new TaskInfo();
taskInfo.taskId = event.getTaskId();
taskInfo.startTime = event.getStartTime();
taskInfo.taskType = event.getTaskType();
taskInfo.splitLocations = event.getSplitLocations();
info.tasksMap.put(event.getTaskId(), taskInfo);
}
private void handleJobFailedEvent(JobUnsuccessfulCompletionEvent event) {
info.finishTime = event.getFinishTime();
info.finishedMaps = event.getFinishedMaps();
info.finishedReduces = event.getFinishedReduces();
info.jobStatus = StringInterner.weakIntern(event.getStatus());
info.errorInfo = StringInterner.weakIntern(event.getDiagnostics());
}
private void handleJobFinishedEvent(JobFinishedEvent event) {
info.finishTime = event.getFinishTime();
info.finishedMaps = event.getFinishedMaps();
info.finishedReduces = event.getFinishedReduces();
info.failedMaps = event.getFailedMaps();
info.failedReduces = event.getFailedReduces();
info.totalCounters = event.getTotalCounters();
info.mapCounters = event.getMapCounters();
info.reduceCounters = event.getReduceCounters();
info.jobStatus = JobStatus.getJobRunState(JobStatus.SUCCEEDED);
}
private void handleJobPriorityChangeEvent(JobPriorityChangeEvent event) {
info.priority = event.getPriority();
}
private void handleJobQueueChangeEvent(JobQueueChangeEvent event) {
info.jobQueueName = event.getJobQueueName();
}
private void handleJobInitedEvent(JobInitedEvent event) {
info.launchTime = event.getLaunchTime();
info.totalMaps = event.getTotalMaps();
info.totalReduces = event.getTotalReduces();
info.uberized = event.getUberized();
}
private void handleAMStartedEvent(AMStartedEvent event) {
AMInfo amInfo = new AMInfo();
amInfo.appAttemptId = event.getAppAttemptId();
amInfo.startTime = event.getStartTime();
amInfo.containerId = event.getContainerId();
amInfo.nodeManagerHost = StringInterner.weakIntern(event.getNodeManagerHost());
amInfo.nodeManagerPort = event.getNodeManagerPort();
amInfo.nodeManagerHttpPort = event.getNodeManagerHttpPort();
if (info.amInfos == null) {
info.amInfos = new LinkedList<AMInfo>();
}
info.amInfos.add(amInfo);
info.latestAmInfo = amInfo;
}
private void handleJobInfoChangeEvent(JobInfoChangeEvent event) {
info.submitTime = event.getSubmitTime();
info.launchTime = event.getLaunchTime();
}
private void handleJobSubmittedEvent(JobSubmittedEvent event) {
info.jobid = event.getJobId();
info.jobname = event.getJobName();
info.username = StringInterner.weakIntern(event.getUserName());
info.submitTime = event.getSubmitTime();
info.jobConfPath = event.getJobConfPath();
info.jobACLs = event.getJobAcls();
info.jobQueueName = StringInterner.weakIntern(event.getJobQueueName());
}
/**
* The class where job information is aggregated into after parsing
*/
public static class JobInfo {
String errorInfo = "";
long submitTime;
long finishTime;
JobID jobid;
String username;
String jobname;
String jobQueueName;
String jobConfPath;
long launchTime;
int totalMaps;
int totalReduces;
int failedMaps;
int failedReduces;
int finishedMaps;
int finishedReduces;
String jobStatus;
Counters totalCounters;
Counters mapCounters;
Counters reduceCounters;
JobPriority priority;
Map<JobACL, AccessControlList> jobACLs;
Map<TaskID, TaskInfo> tasksMap;
Map<TaskAttemptID, TaskAttemptInfo> completedTaskAttemptsMap;
List<AMInfo> amInfos;
AMInfo latestAmInfo;
boolean uberized;
/** Create a job info object where job information will be stored
* after a parse
*/
public JobInfo() {
submitTime = launchTime = finishTime = -1;
totalMaps = totalReduces = failedMaps = failedReduces = 0;
finishedMaps = finishedReduces = 0;
username = jobname = jobConfPath = jobQueueName = "";
tasksMap = new HashMap<TaskID, TaskInfo>();
completedTaskAttemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
jobACLs = new HashMap<JobACL, AccessControlList>();
priority = JobPriority.NORMAL;
}
/** Print all the job information */
public void printAll() {
System.out.println("JOBNAME: " + jobname);
System.out.println("USERNAME: " + username);
System.out.println("JOB_QUEUE_NAME: " + jobQueueName);
System.out.println("SUBMIT_TIME" + submitTime);
System.out.println("LAUNCH_TIME: " + launchTime);
System.out.println("JOB_STATUS: " + jobStatus);
System.out.println("PRIORITY: " + priority);
System.out.println("TOTAL_MAPS: " + totalMaps);
System.out.println("TOTAL_REDUCES: " + totalReduces);
if (mapCounters != null) {
System.out.println("MAP_COUNTERS:" + mapCounters.toString());
}
if (reduceCounters != null) {
System.out.println("REDUCE_COUNTERS:" + reduceCounters.toString());
}
if (totalCounters != null) {
System.out.println("TOTAL_COUNTERS: " + totalCounters.toString());
}
System.out.println("UBERIZED: " + uberized);
if (amInfos != null) {
for (AMInfo amInfo : amInfos) {
amInfo.printAll();
}
}
for (TaskInfo ti: tasksMap.values()) {
ti.printAll();
}
}
/** @return the job submit time */
public long getSubmitTime() { return submitTime; }
/** @return the job finish time */
public long getFinishTime() { return finishTime; }
/** @return the job id */
public JobID getJobId() { return jobid; }
/** @return the user name */
public String getUsername() { return username; }
/** @return the job name */
public String getJobname() { return jobname; }
/** @return the job queue name */
public String getJobQueueName() { return jobQueueName; }
/** @return the path for the job configuration file */
public String getJobConfPath() { return jobConfPath; }
/** @return the job launch time */
public long getLaunchTime() { return launchTime; }
/** @return the total number of maps */
public long getTotalMaps() { return totalMaps; }
/** @return the total number of reduces */
public long getTotalReduces() { return totalReduces; }
/** @return the total number of failed maps */
public long getFailedMaps() { return failedMaps; }
/** @return the number of failed reduces */
public long getFailedReduces() { return failedReduces; }
/** @return the number of finished maps */
public long getFinishedMaps() { return finishedMaps; }
/** @return the number of finished reduces */
public long getFinishedReduces() { return finishedReduces; }
/** @return the job status */
public String getJobStatus() { return jobStatus; }
public String getErrorInfo() { return errorInfo; }
/** @return the counters for the job */
public Counters getTotalCounters() { return totalCounters; }
/** @return the map counters for the job */
public Counters getMapCounters() { return mapCounters; }
/** @return the reduce counters for the job */
public Counters getReduceCounters() { return reduceCounters; }
/** @return the map of all tasks in this job */
public Map<TaskID, TaskInfo> getAllTasks() { return tasksMap; }
/** @return the map of all completed task attempts in this job */
public Map<TaskAttemptID, TaskAttemptInfo> getAllCompletedTaskAttempts() { return completedTaskAttemptsMap; }
/** @return the priority of this job */
public String getPriority() { return priority.toString(); }
public Map<JobACL, AccessControlList> getJobACLs() { return jobACLs; }
/** @return the uberized status of this job */
public boolean getUberized() { return uberized; }
/** @return the AMInfo for the job's AppMaster */
public List<AMInfo> getAMInfos() { return amInfos; }
/** @return the AMInfo for the newest AppMaster */
public AMInfo getLatestAMInfo() { return latestAmInfo; }
}
/**
* TaskInformation is aggregated in this class after parsing
*/
public static class TaskInfo {
TaskID taskId;
long startTime;
long finishTime;
TaskType taskType;
String splitLocations;
Counters counters;
String status;
String error;
TaskAttemptID failedDueToAttemptId;
TaskAttemptID successfulAttemptId;
Map<TaskAttemptID, TaskAttemptInfo> attemptsMap;
public TaskInfo() {
startTime = finishTime = -1;
error = splitLocations = "";
attemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
}
public void printAll() {
System.out.println("TASK_ID:" + taskId.toString());
System.out.println("START_TIME: " + startTime);
System.out.println("FINISH_TIME:" + finishTime);
System.out.println("TASK_TYPE:" + taskType);
if (counters != null) {
System.out.println("COUNTERS:" + counters.toString());
}
for (TaskAttemptInfo tinfo: attemptsMap.values()) {
tinfo.printAll();
}
}
/** @return the Task ID */
public TaskID getTaskId() { return taskId; }
/** @return the start time of this task */
public long getStartTime() { return startTime; }
/** @return the finish time of this task */
public long getFinishTime() { return finishTime; }
/** @return the task type */
public TaskType getTaskType() { return taskType; }
/** @return the split locations */
public String getSplitLocations() { return splitLocations; }
/** @return the counters for this task */
public Counters getCounters() { return counters; }
/** @return the task status */
public String getTaskStatus() { return status; }
/** @return the attempt Id that caused this task to fail */
public TaskAttemptID getFailedDueToAttemptId() {
return failedDueToAttemptId;
}
/** @return the attempt Id that caused this task to succeed */
public TaskAttemptID getSuccessfulAttemptId() {
return successfulAttemptId;
}
/** @return the error */
public String getError() { return error; }
/** @return the map of all attempts for this task */
public Map<TaskAttemptID, TaskAttemptInfo> getAllTaskAttempts() {
return attemptsMap;
}
}
/**
* Task Attempt Information is aggregated in this class after parsing
*/
public static class TaskAttemptInfo {
TaskAttemptID attemptId;
long startTime;
long finishTime;
long shuffleFinishTime;
long sortFinishTime;
long mapFinishTime;
String error;
String status;
String state;
TaskType taskType;
String trackerName;
Counters counters;
int httpPort;
int shufflePort;
String hostname;
int port;
String rackname;
ContainerId containerId;
/** Create a Task Attempt Info which will store attempt level information
* on a history parse.
*/
public TaskAttemptInfo() {
startTime = finishTime = shuffleFinishTime = sortFinishTime =
mapFinishTime = -1;
error = state = trackerName = hostname = rackname = "";
port = -1;
httpPort = -1;
shufflePort = -1;
}
/**
* Print all the information about this attempt.
*/
public void printAll() {
System.out.println("ATTEMPT_ID:" + attemptId.toString());
System.out.println("START_TIME: " + startTime);
System.out.println("FINISH_TIME:" + finishTime);
System.out.println("ERROR:" + error);
System.out.println("TASK_STATUS:" + status);
System.out.println("STATE:" + state);
System.out.println("TASK_TYPE:" + taskType);
System.out.println("TRACKER_NAME:" + trackerName);
System.out.println("HTTP_PORT:" + httpPort);
System.out.println("SHUFFLE_PORT:" + shufflePort);
System.out.println("CONTIANER_ID:" + containerId);
if (counters != null) {
System.out.println("COUNTERS:" + counters.toString());
}
}
/** @return the attempt Id */
public TaskAttemptID getAttemptId() { return attemptId; }
/** @return the start time of the attempt */
public long getStartTime() { return startTime; }
/** @return the finish time of the attempt */
public long getFinishTime() { return finishTime; }
/** @return the shuffle finish time. Applicable only for reduce attempts */
public long getShuffleFinishTime() { return shuffleFinishTime; }
/** @return the sort finish time. Applicable only for reduce attempts */
public long getSortFinishTime() { return sortFinishTime; }
/** @return the map finish time. Applicable only for map attempts */
public long getMapFinishTime() { return mapFinishTime; }
/** @return the error string */
public String getError() { return error; }
/** @return the state */
public String getState() { return state; }
/** @return the task status */
public String getTaskStatus() { return status; }
/** @return the task type */
public TaskType getTaskType() { return taskType; }
/** @return the tracker name where the attempt executed */
public String getTrackerName() { return trackerName; }
/** @return the host name */
public String getHostname() { return hostname; }
/** @return the port */
public int getPort() { return port; }
/** @return the rack name */
public String getRackname() { return rackname; }
/** @return the counters for the attempt */
public Counters getCounters() { return counters; }
/** @return the HTTP port for the tracker */
public int getHttpPort() { return httpPort; }
/** @return the Shuffle port for the tracker */
public int getShufflePort() { return shufflePort; }
/** @return the ContainerId for the tracker */
public ContainerId getContainerId() { return containerId; }
}
/**
* Stores AM information
*/
public static class AMInfo {
ApplicationAttemptId appAttemptId;
long startTime;
ContainerId containerId;
String nodeManagerHost;
int nodeManagerPort;
int nodeManagerHttpPort;
/**
* Create a AM Info which will store AM level information on a history
* parse.
*/
public AMInfo() {
startTime = -1;
nodeManagerHost = "";
nodeManagerHttpPort = -1;
}
public AMInfo(ApplicationAttemptId appAttemptId, long startTime,
ContainerId containerId, String nodeManagerHost, int nodeManagerPort,
int nodeManagerHttpPort) {
this.appAttemptId = appAttemptId;
this.startTime = startTime;
this.containerId = containerId;
this.nodeManagerHost = nodeManagerHost;
this.nodeManagerPort = nodeManagerPort;
this.nodeManagerHttpPort = nodeManagerHttpPort;
}
/**
* Print all the information about this AM.
*/
public void printAll() {
System.out.println("APPLICATION_ATTEMPT_ID:" + appAttemptId.toString());
System.out.println("START_TIME: " + startTime);
System.out.println("CONTAINER_ID: " + containerId.toString());
System.out.println("NODE_MANAGER_HOST: " + nodeManagerHost);
System.out.println("NODE_MANAGER_PORT: " + nodeManagerPort);
System.out.println("NODE_MANAGER_HTTP_PORT: " + nodeManagerHttpPort);
}
/** @return the ApplicationAttemptId */
public ApplicationAttemptId getAppAttemptId() {
return appAttemptId;
}
/** @return the start time of the AM */
public long getStartTime() {
return startTime;
}
/** @return the container id for the AM */
public ContainerId getContainerId() {
return containerId;
}
/** @return the host name for the node manager on which the AM is running */
public String getNodeManagerHost() {
return nodeManagerHost;
}
/** @return the port for the node manager running the AM */
public int getNodeManagerPort() {
return nodeManagerPort;
}
/** @return the http port for the node manager running the AM */
public int getNodeManagerHttpPort() {
return nodeManagerHttpPort;
}
}
}
| 29,325 | 35.429814 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobQueueChangeEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.mapreduce.JobID;
@SuppressWarnings("deprecation")
public class JobQueueChangeEvent implements HistoryEvent {
private JobQueueChange datum = new JobQueueChange();
public JobQueueChangeEvent(JobID id, String queueName) {
datum.jobid = new Utf8(id.toString());
datum.jobQueueName = new Utf8(queueName);
}
JobQueueChangeEvent() { }
@Override
public EventType getEventType() {
return EventType.JOB_QUEUE_CHANGED;
}
@Override
public Object getDatum() {
return datum;
}
@Override
public void setDatum(Object datum) {
this.datum = (JobQueueChange) datum;
}
/** Get the Job ID */
public JobID getJobId() {
return JobID.forName(datum.jobid.toString());
}
/** Get the new Job queue name */
public String getJobQueueName() {
if (datum.jobQueueName != null) {
return datum.jobQueueName.toString();
}
return null;
}
}
| 1,819 | 27.4375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
/**
* Event to record the start of a task
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskStartedEvent implements HistoryEvent {
private TaskStarted datum = new TaskStarted();
/**
* Create an event to record start of a task
* @param id Task Id
* @param startTime Start time of the task
* @param taskType Type of the task
* @param splitLocations Split locations, applicable for map tasks
*/
public TaskStartedEvent(TaskID id, long startTime,
TaskType taskType, String splitLocations) {
datum.setTaskid(new Utf8(id.toString()));
datum.setSplitLocations(new Utf8(splitLocations));
datum.setStartTime(startTime);
datum.setTaskType(new Utf8(taskType.name()));
}
TaskStartedEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) { this.datum = (TaskStarted)datum; }
/** Get the task id */
public TaskID getTaskId() {
return TaskID.forName(datum.getTaskid().toString());
}
/** Get the split locations, applicable for map tasks */
public String getSplitLocations() {
return datum.getSplitLocations().toString();
}
/** Get the start time of the task */
public long getStartTime() { return datum.getStartTime(); }
/** Get the task type */
public TaskType getTaskType() {
return TaskType.valueOf(datum.getTaskType().toString());
}
/** Get the event type */
public EventType getEventType() {
return EventType.TASK_STARTED;
}
}
| 2,585 | 33.026316 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.avro.util.Utf8;
/**
* Event to record unsuccessful (Killed/Failed) completion of task attempts
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent {
private TaskAttemptUnsuccessfulCompletion datum = null;
private TaskAttemptID attemptId;
private TaskType taskType;
private String status;
private long finishTime;
private String hostname;
private int port;
private String rackName;
private String error;
private Counters counters;
int[][] allSplits;
int[] clockSplits;
int[] cpuUsages;
int[] vMemKbytes;
int[] physMemKbytes;
private static final Counters EMPTY_COUNTERS = new Counters();
/**
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
* @param status Status of the attempt
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
* @param port rpc port for for the tracker
* @param rackName Name of the rack where the attempt executed
* @param error Error string
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*/
public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.status = status;
this.finishTime = finishTime;
this.hostname = hostname;
this.port = port;
this.rackName = rackName;
this.error = error;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits =
ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages =
ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.vMemKbytes =
ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes =
ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
/**
* @deprecated please use the constructor with an additional
* argument, an array of splits arrays instead. See
* {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
* for an explanation of the meaning of that parameter.
*
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
* @param status Status of the attempt
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
* @param error Error string
*/
public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, String error) {
this(id, taskType, status, finishTime, hostname, -1, "",
error, EMPTY_COUNTERS, null);
}
public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, int[][] allSplits) {
this(id, taskType, status, finishTime, hostname, port,
rackName, error, EMPTY_COUNTERS, null);
}
TaskAttemptUnsuccessfulCompletionEvent() {}
public Object getDatum() {
if(datum == null) {
datum = new TaskAttemptUnsuccessfulCompletion();
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setTaskType(new Utf8(taskType.name()));
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setFinishTime(finishTime);
datum.setHostname(new Utf8(hostname));
if (rackName != null) {
datum.setRackname(new Utf8(rackName));
}
datum.setPort(port);
datum.setError(new Utf8(error));
datum.setStatus(new Utf8(status));
datum.setCounters(EventWriter.toAvro(counters));
datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetWallclockTime(allSplits)));
datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetCPUTime(allSplits)));
datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetVMemKbytes(allSplits)));
datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetPhysMemKbytes(allSplits)));
}
return datum;
}
public void setDatum(Object odatum) {
this.datum =
(TaskAttemptUnsuccessfulCompletion)odatum;
this.attemptId =
TaskAttemptID.forName(datum.getAttemptId().toString());
this.taskType =
TaskType.valueOf(datum.getTaskType().toString());
this.finishTime = datum.getFinishTime();
this.hostname = datum.getHostname().toString();
this.rackName = datum.getRackname().toString();
this.port = datum.getPort();
this.status = datum.getStatus().toString();
this.error = datum.getError().toString();
this.counters =
EventReader.fromAvro(datum.getCounters());
this.clockSplits =
AvroArrayUtils.fromAvro(datum.getClockSplits());
this.cpuUsages =
AvroArrayUtils.fromAvro(datum.getCpuUsages());
this.vMemKbytes =
AvroArrayUtils.fromAvro(datum.getVMemKbytes());
this.physMemKbytes =
AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
}
/** Get the task id */
public TaskID getTaskId() {
return attemptId.getTaskID();
}
/** Get the task type */
public TaskType getTaskType() {
return TaskType.valueOf(taskType.toString());
}
/** Get the attempt id */
public TaskAttemptID getTaskAttemptId() {
return attemptId;
}
/** Get the finish time */
public long getFinishTime() { return finishTime; }
/** Get the name of the host where the attempt executed */
public String getHostname() { return hostname; }
/** Get the rpc port for the host where the attempt executed */
public int getPort() { return port; }
/** Get the rack name of the node where the attempt ran */
public String getRackName() {
return rackName == null ? null : rackName.toString();
}
/** Get the error string */
public String getError() { return error.toString(); }
/** Get the task status */
public String getTaskStatus() {
return status.toString();
}
/** Get the counters */
Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
// Note that the task type can be setup/map/reduce/cleanup but the
// attempt-type can only be map/reduce.
// find out if the task failed or got killed
boolean failed = TaskStatus.State.FAILED.toString().equals(getTaskStatus());
return getTaskId().getTaskType() == TaskType.MAP
? (failed
? EventType.MAP_ATTEMPT_FAILED
: EventType.MAP_ATTEMPT_KILLED)
: (failed
? EventType.REDUCE_ATTEMPT_FAILED
: EventType.REDUCE_ATTEMPT_KILLED);
}
public int[] getClockSplits() {
return clockSplits;
}
public int[] getCpuUsages() {
return cpuUsages;
}
public int[] getVMemKbytes() {
return vMemKbytes;
}
public int[] getPhysMemKbytes() {
return physMemKbytes;
}
}
| 8,796 | 33.90873 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskUpdatedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.avro.util.Utf8;
/**
* Event to record updates to a task
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskUpdatedEvent implements HistoryEvent {
private TaskUpdated datum = new TaskUpdated();
/**
* Create an event to record task updates
* @param id Id of the task
* @param finishTime Finish time of the task
*/
public TaskUpdatedEvent(TaskID id, long finishTime) {
datum.setTaskid(new Utf8(id.toString()));
datum.setFinishTime(finishTime);
}
TaskUpdatedEvent() {}
public Object getDatum() { return datum; }
public void setDatum(Object datum) { this.datum = (TaskUpdated)datum; }
/** Get the task ID */
public TaskID getTaskId() {
return TaskID.forName(datum.getTaskid().toString());
}
/** Get the task finish time */
public long getFinishTime() { return datum.getFinishTime(); }
/** Get the event type */
public EventType getEventType() {
return EventType.TASK_UPDATED;
}
}
| 2,036 | 30.338462 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AvroArrayUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
public class AvroArrayUtils {
private static final Schema ARRAY_INT
= Schema.createArray(Schema.create(Schema.Type.INT));
static public List<Integer> NULL_PROGRESS_SPLITS_ARRAY
= new GenericData.Array<Integer>(0, ARRAY_INT);
public static List<Integer>
toAvro(int values[]) {
List<Integer> result = new ArrayList<Integer>(values.length);
for (int i = 0; i < values.length; ++i) {
result.add(values[i]);
}
return result;
}
public static int[] fromAvro(List<Integer> avro) {
int[] result = new int[avro.size()];
int i = 0;
for (Iterator<Integer> iter = avro.iterator(); iter.hasNext(); ++i) {
result[i] = iter.next();
}
return result;
}
}
| 1,738 | 28.474576 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class HostUtil {
/**
* Construct the taskLogUrl
* @param taskTrackerHostName
* @param httpPort
* @param taskAttemptID
* @return the taskLogUrl
*/
public static String getTaskLogUrl(String scheme, String taskTrackerHostName,
String httpPort, String taskAttemptID) {
return (scheme + taskTrackerHostName + ":" +
httpPort + "/tasklog?attemptid=" + taskAttemptID);
}
/**
* Always throws {@link RuntimeException} because this method is not
* supposed to be called at runtime. This method is only for keeping
* binary compatibility with Hive 0.13. MAPREDUCE-5830 for the details.
* @deprecated Use {@link #getTaskLogUrl(String, String, String, String)}
* to construct the taskLogUrl.
*/
@Deprecated
public static String getTaskLogUrl(String taskTrackerHostName,
String httpPort, String taskAttemptID) {
throw new RuntimeException(
"This method is not supposed to be called at runtime. " +
"Use HostUtil.getTaskLogUrl(String, String, String, String) instead.");
}
public static String convertTrackerNameToHostName(String trackerName) {
// Ugly!
// Convert the trackerName to its host name
int indexOfColon = trackerName.indexOf(":");
String trackerHostName = (indexOfColon == -1) ?
trackerName :
trackerName.substring(0, indexOfColon);
return trackerHostName.substring("tracker_".length());
}
}
| 2,467 | 35.835821 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,023 | 43.521739 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.util.Locale;
import java.util.ResourceBundle;
import java.util.MissingResourceException;
/**
* Helper class to handle resource bundles in a saner way
*/
public class ResourceBundles {
/**
* Get a resource bundle
* @param bundleName of the resource
* @return the resource bundle
* @throws MissingResourceException
*/
public static ResourceBundle getBundle(String bundleName) {
return ResourceBundle.getBundle(bundleName.replace('$', '_'),
Locale.getDefault(), Thread.currentThread().getContextClassLoader());
}
/**
* Get a resource given bundle name and key
* @param <T> type of the resource
* @param bundleName name of the resource bundle
* @param key to lookup the resource
* @param suffix for the key to lookup
* @param defaultValue of the resource
* @return the resource or the defaultValue
* @throws ClassCastException if the resource found doesn't match T
*/
@SuppressWarnings("unchecked")
public static synchronized <T> T getValue(String bundleName, String key,
String suffix, T defaultValue) {
T value;
try {
ResourceBundle bundle = getBundle(bundleName);
value = (T) bundle.getObject(getLookupKey(key, suffix));
}
catch (Exception e) {
return defaultValue;
}
return value;
}
private static String getLookupKey(String key, String suffix) {
if (suffix == null || suffix.isEmpty()) return key;
return key + suffix;
}
/**
* Get the counter group display name
* @param group the group name to lookup
* @param defaultValue of the group
* @return the group display name
*/
public static String getCounterGroupName(String group, String defaultValue) {
return getValue(group, "CounterGroupName", "", defaultValue);
}
/**
* Get the counter display name
* @param group the counter group name for the counter
* @param counter the counter name to lookup
* @param defaultValue of the counter
* @return the counter display name
*/
public static String getCounterName(String group, String counter,
String defaultValue) {
return getValue(group, counter, ".name", defaultValue);
}
}
| 3,100 | 32.706522 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
/**
* Place holder for deprecated keys in the framework
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ConfigUtil {
/**
* Adds all the deprecated keys. Loads mapred-default.xml and mapred-site.xml
*/
public static void loadResources() {
addDeprecatedKeys();
Configuration.addDefaultResource("mapred-default.xml");
Configuration.addDefaultResource("mapred-site.xml");
Configuration.addDefaultResource("yarn-default.xml");
Configuration.addDefaultResource("yarn-site.xml");
}
/**
* Adds deprecated keys and the corresponding new keys to the Configuration
*/
@SuppressWarnings("deprecation")
private static void addDeprecatedKeys() {
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("mapred.temp.dir",
MRConfig.TEMP_DIR),
new DeprecationDelta("mapred.local.dir",
MRConfig.LOCAL_DIR),
new DeprecationDelta("mapred.cluster.map.memory.mb",
MRConfig.MAPMEMORY_MB),
new DeprecationDelta("mapred.cluster.reduce.memory.mb",
MRConfig.REDUCEMEMORY_MB),
new DeprecationDelta("mapred.acls.enabled",
MRConfig.MR_ACLS_ENABLED),
new DeprecationDelta("mapred.cluster.max.map.memory.mb",
JTConfig.JT_MAX_MAPMEMORY_MB),
new DeprecationDelta("mapred.cluster.max.reduce.memory.mb",
JTConfig.JT_MAX_REDUCEMEMORY_MB),
new DeprecationDelta("mapred.cluster.average.blacklist.threshold",
JTConfig.JT_AVG_BLACKLIST_THRESHOLD),
new DeprecationDelta("hadoop.job.history.location",
JTConfig.JT_JOBHISTORY_LOCATION),
new DeprecationDelta(
"mapred.job.tracker.history.completed.location",
JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION),
new DeprecationDelta("mapred.jobtracker.job.history.block.size",
JTConfig.JT_JOBHISTORY_BLOCK_SIZE),
new DeprecationDelta("mapred.job.tracker.jobhistory.lru.cache.size",
JTConfig.JT_JOBHISTORY_CACHE_SIZE),
new DeprecationDelta("mapred.hosts",
JTConfig.JT_HOSTS_FILENAME),
new DeprecationDelta("mapred.hosts.exclude",
JTConfig.JT_HOSTS_EXCLUDE_FILENAME),
new DeprecationDelta("mapred.system.dir",
JTConfig.JT_SYSTEM_DIR),
new DeprecationDelta("mapred.max.tracker.blacklists",
JTConfig.JT_MAX_TRACKER_BLACKLISTS),
new DeprecationDelta("mapred.job.tracker",
JTConfig.JT_IPC_ADDRESS),
new DeprecationDelta("mapred.job.tracker.http.address",
JTConfig.JT_HTTP_ADDRESS),
new DeprecationDelta("mapred.job.tracker.handler.count",
JTConfig.JT_IPC_HANDLER_COUNT),
new DeprecationDelta("mapred.jobtracker.restart.recover",
JTConfig.JT_RESTART_ENABLED),
new DeprecationDelta("mapred.jobtracker.taskScheduler",
JTConfig.JT_TASK_SCHEDULER),
new DeprecationDelta(
"mapred.jobtracker.taskScheduler.maxRunningTasksPerJob",
JTConfig.JT_RUNNINGTASKS_PER_JOB),
new DeprecationDelta("mapred.jobtracker.instrumentation",
JTConfig.JT_INSTRUMENTATION),
new DeprecationDelta("mapred.jobtracker.maxtasks.per.job",
JTConfig.JT_TASKS_PER_JOB),
new DeprecationDelta("mapred.heartbeats.in.second",
JTConfig.JT_HEARTBEATS_IN_SECOND),
new DeprecationDelta("mapred.job.tracker.persist.jobstatus.active",
JTConfig.JT_PERSIST_JOBSTATUS),
new DeprecationDelta("mapred.job.tracker.persist.jobstatus.hours",
JTConfig.JT_PERSIST_JOBSTATUS_HOURS),
new DeprecationDelta("mapred.job.tracker.persist.jobstatus.dir",
JTConfig.JT_PERSIST_JOBSTATUS_DIR),
new DeprecationDelta("mapred.permissions.supergroup",
MRConfig.MR_SUPERGROUP),
new DeprecationDelta("mapreduce.jobtracker.permissions.supergroup",
MRConfig.MR_SUPERGROUP),
new DeprecationDelta("mapred.task.cache.levels",
JTConfig.JT_TASKCACHE_LEVELS),
new DeprecationDelta("mapred.jobtracker.taskalloc.capacitypad",
JTConfig.JT_TASK_ALLOC_PAD_FRACTION),
new DeprecationDelta("mapred.jobinit.threads",
JTConfig.JT_JOBINIT_THREADS),
new DeprecationDelta("mapred.tasktracker.expiry.interval",
JTConfig.JT_TRACKER_EXPIRY_INTERVAL),
new DeprecationDelta("mapred.job.tracker.retiredjobs.cache.size",
JTConfig.JT_RETIREJOB_CACHE_SIZE),
new DeprecationDelta("mapred.job.tracker.retire.jobs",
JTConfig.JT_RETIREJOBS),
new DeprecationDelta("mapred.healthChecker.interval",
TTConfig.TT_HEALTH_CHECKER_INTERVAL),
new DeprecationDelta("mapred.healthChecker.script.args",
TTConfig.TT_HEALTH_CHECKER_SCRIPT_ARGS),
new DeprecationDelta("mapred.healthChecker.script.path",
TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH),
new DeprecationDelta("mapred.healthChecker.script.timeout",
TTConfig.TT_HEALTH_CHECKER_SCRIPT_TIMEOUT),
new DeprecationDelta("mapred.local.dir.minspacekill",
TTConfig.TT_LOCAL_DIR_MINSPACE_KILL),
new DeprecationDelta("mapred.local.dir.minspacestart",
TTConfig.TT_LOCAL_DIR_MINSPACE_START),
new DeprecationDelta("mapred.task.tracker.http.address",
TTConfig.TT_HTTP_ADDRESS),
new DeprecationDelta("mapred.task.tracker.report.address",
TTConfig.TT_REPORT_ADDRESS),
new DeprecationDelta("mapred.task.tracker.task-controller",
TTConfig.TT_TASK_CONTROLLER),
new DeprecationDelta("mapred.tasktracker.dns.interface",
TTConfig.TT_DNS_INTERFACE),
new DeprecationDelta("mapred.tasktracker.dns.nameserver",
TTConfig.TT_DNS_NAMESERVER),
new DeprecationDelta("mapred.tasktracker.events.batchsize",
TTConfig.TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL),
new DeprecationDelta("mapred.tasktracker.indexcache.mb",
TTConfig.TT_INDEX_CACHE),
new DeprecationDelta("mapred.tasktracker.instrumentation",
TTConfig.TT_INSTRUMENTATION),
new DeprecationDelta("mapred.tasktracker.map.tasks.maximum",
TTConfig.TT_MAP_SLOTS),
new DeprecationDelta("mapred.tasktracker.memory_calculator_plugin",
TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN),
new DeprecationDelta("mapred.tasktracker.memorycalculatorplugin",
TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN),
new DeprecationDelta("mapred.tasktracker.reduce.tasks.maximum",
TTConfig.TT_REDUCE_SLOTS),
new DeprecationDelta(
"mapred.tasktracker.tasks.sleeptime-before-sigkill",
TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL),
new DeprecationDelta("slave.host.name",
TTConfig.TT_HOST_NAME),
new DeprecationDelta("tasktracker.http.threads",
TTConfig.TT_HTTP_THREADS),
new DeprecationDelta("hadoop.net.static.resolutions",
TTConfig.TT_STATIC_RESOLUTIONS),
new DeprecationDelta("local.cache.size",
TTConfig.TT_LOCAL_CACHE_SIZE),
new DeprecationDelta("tasktracker.contention.tracking",
TTConfig.TT_CONTENTION_TRACKING),
new DeprecationDelta("yarn.app.mapreduce.yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts",
MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS),
new DeprecationDelta("job.end.notification.url",
MRJobConfig.MR_JOB_END_NOTIFICATION_URL),
new DeprecationDelta("job.end.retry.attempts",
MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS),
new DeprecationDelta("job.end.retry.interval",
MRJobConfig.MR_JOB_END_RETRY_INTERVAL),
new DeprecationDelta("mapred.committer.job.setup.cleanup.needed",
MRJobConfig.SETUP_CLEANUP_NEEDED),
new DeprecationDelta("mapred.jar",
MRJobConfig.JAR),
new DeprecationDelta("mapred.job.id",
MRJobConfig.ID),
new DeprecationDelta("mapred.job.name",
MRJobConfig.JOB_NAME),
new DeprecationDelta("mapred.job.priority",
MRJobConfig.PRIORITY),
new DeprecationDelta("mapred.job.queue.name",
MRJobConfig.QUEUE_NAME),
new DeprecationDelta("mapred.job.reuse.jvm.num.tasks",
MRJobConfig.JVM_NUMTASKS_TORUN),
new DeprecationDelta("mapred.map.tasks",
MRJobConfig.NUM_MAPS),
new DeprecationDelta("mapred.max.tracker.failures",
MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER),
new DeprecationDelta("mapred.reduce.slowstart.completed.maps",
MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART),
new DeprecationDelta("mapred.reduce.tasks",
MRJobConfig.NUM_REDUCES),
new DeprecationDelta("mapred.skip.on",
MRJobConfig.SKIP_RECORDS),
new DeprecationDelta("mapred.skip.out.dir",
MRJobConfig.SKIP_OUTDIR),
new DeprecationDelta("mapred.speculative.execution.slowTaskThreshold",
MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD),
new DeprecationDelta("mapred.speculative.execution.speculativeCap",
MRJobConfig.SPECULATIVECAP_RUNNING_TASKS),
new DeprecationDelta("job.local.dir",
MRJobConfig.JOB_LOCAL_DIR),
new DeprecationDelta("mapreduce.inputformat.class",
MRJobConfig.INPUT_FORMAT_CLASS_ATTR),
new DeprecationDelta("mapreduce.map.class",
MRJobConfig.MAP_CLASS_ATTR),
new DeprecationDelta("mapreduce.combine.class",
MRJobConfig.COMBINE_CLASS_ATTR),
new DeprecationDelta("mapreduce.reduce.class",
MRJobConfig.REDUCE_CLASS_ATTR),
new DeprecationDelta("mapreduce.outputformat.class",
MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR),
new DeprecationDelta("mapreduce.partitioner.class",
MRJobConfig.PARTITIONER_CLASS_ATTR),
new DeprecationDelta("mapred.job.classpath.archives",
MRJobConfig.CLASSPATH_ARCHIVES),
new DeprecationDelta("mapred.job.classpath.files",
MRJobConfig.CLASSPATH_FILES),
new DeprecationDelta("mapred.cache.files",
MRJobConfig.CACHE_FILES),
new DeprecationDelta("mapred.cache.archives",
MRJobConfig.CACHE_ARCHIVES),
new DeprecationDelta("mapred.cache.localFiles",
MRJobConfig.CACHE_LOCALFILES),
new DeprecationDelta("mapred.cache.localArchives",
MRJobConfig.CACHE_LOCALARCHIVES),
new DeprecationDelta("mapred.cache.files.filesizes",
MRJobConfig.CACHE_FILES_SIZES),
new DeprecationDelta("mapred.cache.archives.filesizes",
MRJobConfig.CACHE_ARCHIVES_SIZES),
new DeprecationDelta("mapred.cache.files.timestamps",
MRJobConfig.CACHE_FILE_TIMESTAMPS),
new DeprecationDelta("mapred.cache.archives.timestamps",
MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS),
new DeprecationDelta("mapred.working.dir",
MRJobConfig.WORKING_DIR),
new DeprecationDelta("user.name",
MRJobConfig.USER_NAME),
new DeprecationDelta("mapred.output.key.class",
MRJobConfig.OUTPUT_KEY_CLASS),
new DeprecationDelta("mapred.output.value.class",
MRJobConfig.OUTPUT_VALUE_CLASS),
new DeprecationDelta("mapred.output.value.groupfn.class",
MRJobConfig.GROUP_COMPARATOR_CLASS),
new DeprecationDelta("mapred.output.key.comparator.class",
MRJobConfig.KEY_COMPARATOR),
new DeprecationDelta("io.sort.factor",
MRJobConfig.IO_SORT_FACTOR),
new DeprecationDelta("io.sort.mb",
MRJobConfig.IO_SORT_MB),
new DeprecationDelta("keep.failed.task.files",
MRJobConfig.PRESERVE_FAILED_TASK_FILES),
new DeprecationDelta("keep.task.files.pattern",
MRJobConfig.PRESERVE_FILES_PATTERN),
new DeprecationDelta("mapred.debug.out.lines",
MRJobConfig.TASK_DEBUGOUT_LINES),
new DeprecationDelta("mapred.merge.recordsBeforeProgress",
MRJobConfig.RECORDS_BEFORE_PROGRESS),
new DeprecationDelta("mapred.merge.recordsBeforeProgress",
MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS),
new DeprecationDelta("mapred.skip.attempts.to.start.skipping",
MRJobConfig.SKIP_START_ATTEMPTS),
new DeprecationDelta("mapred.task.id",
MRJobConfig.TASK_ATTEMPT_ID),
new DeprecationDelta("mapred.task.is.map",
MRJobConfig.TASK_ISMAP),
new DeprecationDelta("mapred.task.partition",
MRJobConfig.TASK_PARTITION),
new DeprecationDelta("mapred.task.profile",
MRJobConfig.TASK_PROFILE),
new DeprecationDelta("mapred.task.profile.maps",
MRJobConfig.NUM_MAP_PROFILES),
new DeprecationDelta("mapred.task.profile.reduces",
MRJobConfig.NUM_REDUCE_PROFILES),
new DeprecationDelta("mapred.task.timeout",
MRJobConfig.TASK_TIMEOUT),
new DeprecationDelta("mapred.tip.id",
MRJobConfig.TASK_ID),
new DeprecationDelta("mapred.work.output.dir",
MRJobConfig.TASK_OUTPUT_DIR),
new DeprecationDelta("mapred.userlog.limit.kb",
MRJobConfig.TASK_USERLOG_LIMIT),
new DeprecationDelta("mapred.userlog.retain.hours",
MRJobConfig.USER_LOG_RETAIN_HOURS),
new DeprecationDelta("mapred.task.profile.params",
MRJobConfig.TASK_PROFILE_PARAMS),
new DeprecationDelta("io.sort.spill.percent",
MRJobConfig.MAP_SORT_SPILL_PERCENT),
new DeprecationDelta("map.input.file",
MRJobConfig.MAP_INPUT_FILE),
new DeprecationDelta("map.input.length",
MRJobConfig.MAP_INPUT_PATH),
new DeprecationDelta("map.input.start",
MRJobConfig.MAP_INPUT_START),
new DeprecationDelta("mapred.job.map.memory.mb",
MRJobConfig.MAP_MEMORY_MB),
new DeprecationDelta("mapred.map.child.env",
MRJobConfig.MAP_ENV),
new DeprecationDelta("mapred.map.child.java.opts",
MRJobConfig.MAP_JAVA_OPTS),
new DeprecationDelta("mapred.map.max.attempts",
MRJobConfig.MAP_MAX_ATTEMPTS),
new DeprecationDelta("mapred.map.task.debug.script",
MRJobConfig.MAP_DEBUG_SCRIPT),
new DeprecationDelta("mapred.map.tasks.speculative.execution",
MRJobConfig.MAP_SPECULATIVE),
new DeprecationDelta("mapred.max.map.failures.percent",
MRJobConfig.MAP_FAILURES_MAX_PERCENT),
new DeprecationDelta("mapred.skip.map.auto.incr.proc.count",
MRJobConfig.MAP_SKIP_INCR_PROC_COUNT),
new DeprecationDelta("mapred.skip.map.max.skip.records",
MRJobConfig.MAP_SKIP_MAX_RECORDS),
new DeprecationDelta("min.num.spills.for.combine",
MRJobConfig.MAP_COMBINE_MIN_SPILLS),
new DeprecationDelta("mapred.compress.map.output",
MRJobConfig.MAP_OUTPUT_COMPRESS),
new DeprecationDelta("mapred.map.output.compression.codec",
MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC),
new DeprecationDelta("mapred.mapoutput.key.class",
MRJobConfig.MAP_OUTPUT_KEY_CLASS),
new DeprecationDelta("mapred.mapoutput.value.class",
MRJobConfig.MAP_OUTPUT_VALUE_CLASS),
new DeprecationDelta("map.output.key.field.separator",
MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR),
new DeprecationDelta("mapred.map.child.log.level",
MRJobConfig.MAP_LOG_LEVEL),
new DeprecationDelta("mapred.inmem.merge.threshold",
MRJobConfig.REDUCE_MERGE_INMEM_THRESHOLD),
new DeprecationDelta("mapred.job.reduce.input.buffer.percent",
MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT),
new DeprecationDelta("mapred.job.reduce.markreset.buffer.percent",
MRJobConfig.REDUCE_MARKRESET_BUFFER_PERCENT),
new DeprecationDelta("mapred.job.reduce.memory.mb",
MRJobConfig.REDUCE_MEMORY_MB),
new DeprecationDelta("mapred.job.reduce.total.mem.bytes",
MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES),
new DeprecationDelta("mapred.job.shuffle.input.buffer.percent",
MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT),
new DeprecationDelta("mapred.job.shuffle.merge.percent",
MRJobConfig.SHUFFLE_MERGE_PERCENT),
new DeprecationDelta("mapred.max.reduce.failures.percent",
MRJobConfig.REDUCE_FAILURES_MAXPERCENT),
new DeprecationDelta("mapred.reduce.child.env",
MRJobConfig.REDUCE_ENV),
new DeprecationDelta("mapred.reduce.child.java.opts",
MRJobConfig.REDUCE_JAVA_OPTS),
new DeprecationDelta("mapred.reduce.max.attempts",
MRJobConfig.REDUCE_MAX_ATTEMPTS),
new DeprecationDelta("mapred.reduce.parallel.copies",
MRJobConfig.SHUFFLE_PARALLEL_COPIES),
new DeprecationDelta("mapred.reduce.task.debug.script",
MRJobConfig.REDUCE_DEBUG_SCRIPT),
new DeprecationDelta("mapred.reduce.tasks.speculative.execution",
MRJobConfig.REDUCE_SPECULATIVE),
new DeprecationDelta("mapred.shuffle.connect.timeout",
MRJobConfig.SHUFFLE_CONNECT_TIMEOUT),
new DeprecationDelta("mapred.shuffle.read.timeout",
MRJobConfig.SHUFFLE_READ_TIMEOUT),
new DeprecationDelta("mapred.skip.reduce.auto.incr.proc.count",
MRJobConfig.REDUCE_SKIP_INCR_PROC_COUNT),
new DeprecationDelta("mapred.skip.reduce.max.skip.groups",
MRJobConfig.REDUCE_SKIP_MAXGROUPS),
new DeprecationDelta("mapred.reduce.child.log.level",
MRJobConfig.REDUCE_LOG_LEVEL),
new DeprecationDelta("mapreduce.job.counters.limit",
MRJobConfig.COUNTERS_MAX_KEY),
new DeprecationDelta("jobclient.completion.poll.interval",
Job.COMPLETION_POLL_INTERVAL_KEY),
new DeprecationDelta("jobclient.progress.monitor.poll.interval",
Job.PROGRESS_MONITOR_POLL_INTERVAL_KEY),
new DeprecationDelta("jobclient.output.filter",
Job.OUTPUT_FILTER),
new DeprecationDelta("mapred.submit.replication",
Job.SUBMIT_REPLICATION),
new DeprecationDelta("mapred.used.genericoptionsparser",
Job.USED_GENERIC_PARSER),
new DeprecationDelta("mapred.input.dir",
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR),
new DeprecationDelta("mapred.input.pathFilter.class",
org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.PATHFILTER_CLASS),
new DeprecationDelta("mapred.max.split.size",
org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.SPLIT_MAXSIZE),
new DeprecationDelta("mapred.min.split.size",
org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.SPLIT_MINSIZE),
new DeprecationDelta("mapred.output.compress",
org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS),
new DeprecationDelta("mapred.output.compression.codec",
org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_CODEC),
new DeprecationDelta("mapred.output.compression.type",
org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_TYPE),
new DeprecationDelta("mapred.output.dir",
org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.OUTDIR),
new DeprecationDelta("mapred.seqbinary.output.key.class",
org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.KEY_CLASS),
new DeprecationDelta("mapred.seqbinary.output.value.class",
org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.VALUE_CLASS),
new DeprecationDelta("sequencefile.filter.class",
org.apache.hadoop.mapreduce.lib.input.
SequenceFileInputFilter.FILTER_CLASS),
new DeprecationDelta("sequencefile.filter.regex",
org.apache.hadoop.mapreduce.lib.input.
SequenceFileInputFilter.FILTER_REGEX),
new DeprecationDelta("sequencefile.filter.frequency",
org.apache.hadoop.mapreduce.lib.input.
SequenceFileInputFilter.FILTER_FREQUENCY),
new DeprecationDelta("mapred.input.dir.mappers",
org.apache.hadoop.mapreduce.lib.input.
MultipleInputs.DIR_MAPPERS),
new DeprecationDelta("mapred.input.dir.formats",
org.apache.hadoop.mapreduce.lib.input.
MultipleInputs.DIR_FORMATS),
new DeprecationDelta("mapred.line.input.format.linespermap",
org.apache.hadoop.mapreduce.lib.input.
NLineInputFormat.LINES_PER_MAP),
new DeprecationDelta("mapred.binary.partitioner.left.offset",
org.apache.hadoop.mapreduce.lib.partition.
BinaryPartitioner.LEFT_OFFSET_PROPERTY_NAME),
new DeprecationDelta("mapred.binary.partitioner.right.offset",
org.apache.hadoop.mapreduce.lib.partition.
BinaryPartitioner.RIGHT_OFFSET_PROPERTY_NAME),
new DeprecationDelta("mapred.text.key.comparator.options",
org.apache.hadoop.mapreduce.lib.partition.
KeyFieldBasedComparator.COMPARATOR_OPTIONS),
new DeprecationDelta("mapred.text.key.partitioner.options",
org.apache.hadoop.mapreduce.lib.partition.
KeyFieldBasedPartitioner.PARTITIONER_OPTIONS),
new DeprecationDelta("mapred.mapper.regex.group",
org.apache.hadoop.mapreduce.lib.map.RegexMapper.GROUP),
new DeprecationDelta("mapred.mapper.regex",
org.apache.hadoop.mapreduce.lib.map.RegexMapper.PATTERN),
new DeprecationDelta("create.empty.dir.if.nonexist",
org.apache.hadoop.mapreduce.lib.jobcontrol.
ControlledJob.CREATE_DIR),
new DeprecationDelta("mapred.data.field.separator",
org.apache.hadoop.mapreduce.lib.fieldsel.
FieldSelectionHelper.DATA_FIELD_SEPERATOR),
new DeprecationDelta("map.output.key.value.fields.spec",
org.apache.hadoop.mapreduce.lib.fieldsel.
FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC),
new DeprecationDelta("reduce.output.key.value.fields.spec",
org.apache.hadoop.mapreduce.lib.fieldsel.
FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC),
new DeprecationDelta("mapred.min.split.size.per.node",
org.apache.hadoop.mapreduce.lib.input.
CombineFileInputFormat.SPLIT_MINSIZE_PERNODE),
new DeprecationDelta("mapred.min.split.size.per.rack",
org.apache.hadoop.mapreduce.lib.input.
CombineFileInputFormat.SPLIT_MINSIZE_PERRACK),
new DeprecationDelta("key.value.separator.in.input.line",
org.apache.hadoop.mapreduce.lib.input.
KeyValueLineRecordReader.KEY_VALUE_SEPERATOR),
new DeprecationDelta("mapred.linerecordreader.maxlength",
org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH),
new DeprecationDelta("mapred.lazy.output.format",
org.apache.hadoop.mapreduce.lib.output.
LazyOutputFormat.OUTPUT_FORMAT),
new DeprecationDelta("mapred.textoutputformat.separator",
org.apache.hadoop.mapreduce.lib.output.
TextOutputFormat.SEPERATOR),
new DeprecationDelta("mapred.join.expr",
org.apache.hadoop.mapreduce.lib.join.
CompositeInputFormat.JOIN_EXPR),
new DeprecationDelta("mapred.join.keycomparator",
org.apache.hadoop.mapreduce.lib.join.
CompositeInputFormat.JOIN_COMPARATOR),
new DeprecationDelta("hadoop.pipes.command-file.keep",
org.apache.hadoop.mapred.pipes.
Submitter.PRESERVE_COMMANDFILE),
new DeprecationDelta("hadoop.pipes.executable",
org.apache.hadoop.mapred.pipes.Submitter.EXECUTABLE),
new DeprecationDelta("hadoop.pipes.executable.interpretor",
org.apache.hadoop.mapred.pipes.Submitter.INTERPRETOR),
new DeprecationDelta("hadoop.pipes.java.mapper",
org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_MAP),
new DeprecationDelta("hadoop.pipes.java.recordreader",
org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RR),
new DeprecationDelta("hadoop.pipes.java.recordwriter",
org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RW),
new DeprecationDelta("hadoop.pipes.java.reducer",
org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_REDUCE),
new DeprecationDelta("hadoop.pipes.partitioner",
org.apache.hadoop.mapred.pipes.Submitter.PARTITIONER),
new DeprecationDelta("mapred.pipes.user.inputformat",
org.apache.hadoop.mapred.pipes.Submitter.INPUT_FORMAT),
new DeprecationDelta("webinterface.private.actions",
JTConfig.PRIVATE_ACTIONS_KEY),
new DeprecationDelta("security.task.umbilical.protocol.acl",
MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL),
new DeprecationDelta("security.job.submission.protocol.acl",
MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT),
new DeprecationDelta("mapreduce.user.classpath.first",
MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST ),
new DeprecationDelta(JTConfig.JT_MAX_JOB_SPLIT_METAINFO_SIZE,
MRJobConfig.SPLIT_METAINFO_MAXSIZE),
new DeprecationDelta("mapred.input.dir.recursive",
FileInputFormat.INPUT_DIR_RECURSIVE)
});
}
public static void main(String[] args) {
loadResources();
Configuration.dumpDeprecatedKeys();
}
}
| 26,284 | 48.039179 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
/**
* Process tree related operations
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ProcessTree {
private static final Log LOG = LogFactory.getLog(ProcessTree.class);
public static final long DEFAULT_SLEEPTIME_BEFORE_SIGKILL = 5000L;
private static final int SIGQUIT = 3;
private static final int SIGTERM = 15;
private static final int SIGKILL = 9;
private static final String SIGQUIT_STR = "SIGQUIT";
private static final String SIGTERM_STR = "SIGTERM";
private static final String SIGKILL_STR = "SIGKILL";
public static final boolean isSetsidAvailable = isSetsidSupported();
private static boolean isSetsidSupported() {
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
String[] args = {"setsid", "bash", "-c", "echo $$"};
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("setsid is not available on this machine. So not using it.");
setsidSupported = false;
} finally { // handle the exit code
LOG.info("setsid exited with exit code " + shexec.getExitCode());
}
return setsidSupported;
}
/**
* Destroy the process-tree.
* @param pid process id of the root process of the subtree of processes
* to be killed
* @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
* after sending SIGTERM
* @param isProcessGroup pid is a process group leader or not
* @param inBackground Process is to be killed in the back ground with
* a separate thread
*/
public static void destroy(String pid, long sleeptimeBeforeSigkill,
boolean isProcessGroup, boolean inBackground) {
if(isProcessGroup) {
destroyProcessGroup(pid, sleeptimeBeforeSigkill, inBackground);
}
else {
//TODO: Destroy all the processes in the subtree in this case also.
// For the time being, killing only the root process.
destroyProcess(pid, sleeptimeBeforeSigkill, inBackground);
}
}
/** Destroy the process.
* @param pid Process id of to-be-killed-process
* @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
* after sending SIGTERM
* @param inBackground Process is to be killed in the back ground with
* a separate thread
*/
protected static void destroyProcess(String pid, long sleeptimeBeforeSigkill,
boolean inBackground) {
terminateProcess(pid);
sigKill(pid, false, sleeptimeBeforeSigkill, inBackground);
}
/** Destroy the process group.
* @param pgrpId Process group id of to-be-killed-processes
* @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
* after sending SIGTERM
* @param inBackground Process group is to be killed in the back ground with
* a separate thread
*/
protected static void destroyProcessGroup(String pgrpId,
long sleeptimeBeforeSigkill, boolean inBackground) {
terminateProcessGroup(pgrpId);
sigKill(pgrpId, true, sleeptimeBeforeSigkill, inBackground);
}
/**
* Send a specified signal to the specified pid
*
* @param pid the pid of the process [group] to signal.
* @param signalNum the signal to send.
* @param signalName the human-readable description of the signal
* (for logging).
*/
private static void sendSignal(String pid, int signalNum, String signalName) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-" + signalNum, pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("Error executing shell command " + ioe);
} finally {
if (pid.startsWith("-")) {
LOG.info("Sending signal to all members of process group " + pid
+ ": " + signalName + ". Exit code " + shexec.getExitCode());
} else {
LOG.info("Signaling process " + pid
+ " with " + signalName + ". Exit code " + shexec.getExitCode());
}
}
}
/**
* Send a specified signal to the process, if it is alive.
*
* @param pid the pid of the process to signal.
* @param signalNum the signal to send.
* @param signalName the human-readable description of the signal
* (for logging).
* @param alwaysSignal if true then send signal even if isAlive(pid) is false
*/
private static void maybeSignalProcess(String pid, int signalNum,
String signalName, boolean alwaysSignal) {
// If process tree is not alive then don't signal, unless alwaysSignal
// forces it so.
if (alwaysSignal || ProcessTree.isAlive(pid)) {
sendSignal(pid, signalNum, signalName);
}
}
private static void maybeSignalProcessGroup(String pgrpId, int signalNum,
String signalName, boolean alwaysSignal) {
if (alwaysSignal || ProcessTree.isProcessGroupAlive(pgrpId)) {
// signaling a process group means using a negative pid.
sendSignal("-" + pgrpId, signalNum, signalName);
}
}
/**
* Sends terminate signal to the process, allowing it to gracefully exit.
*
* @param pid pid of the process to be sent SIGTERM
*/
public static void terminateProcess(String pid) {
maybeSignalProcess(pid, SIGTERM, SIGTERM_STR, true);
}
/**
* Sends terminate signal to all the process belonging to the passed process
* group, allowing the group to gracefully exit.
*
* @param pgrpId process group id
*/
public static void terminateProcessGroup(String pgrpId) {
maybeSignalProcessGroup(pgrpId, SIGTERM, SIGTERM_STR, true);
}
/**
* Kills the process(OR process group) by sending the signal SIGKILL
* in the current thread
* @param pid Process id(OR process group id) of to-be-deleted-process
* @param isProcessGroup Is pid a process group id of to-be-deleted-processes
* @param sleepTimeBeforeSigKill wait time before sending SIGKILL after
* sending SIGTERM
*/
private static void sigKillInCurrentThread(String pid, boolean isProcessGroup,
long sleepTimeBeforeSigKill) {
// Kill the subprocesses of root process(even if the root process is not
// alive) if process group is to be killed.
if (isProcessGroup || ProcessTree.isAlive(pid)) {
try {
// Sleep for some time before sending SIGKILL
Thread.sleep(sleepTimeBeforeSigKill);
} catch (InterruptedException i) {
LOG.warn("Thread sleep is interrupted.");
}
if(isProcessGroup) {
killProcessGroup(pid);
} else {
killProcess(pid);
}
}
}
/** Kills the process(OR process group) by sending the signal SIGKILL
* @param pid Process id(OR process group id) of to-be-deleted-process
* @param isProcessGroup Is pid a process group id of to-be-deleted-processes
* @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
* after sending SIGTERM
* @param inBackground Process is to be killed in the back ground with
* a separate thread
*/
private static void sigKill(String pid, boolean isProcessGroup,
long sleeptimeBeforeSigkill, boolean inBackground) {
if(inBackground) { // use a separate thread for killing
SigKillThread sigKillThread = new SigKillThread(pid, isProcessGroup,
sleeptimeBeforeSigkill);
sigKillThread.setDaemon(true);
sigKillThread.start();
}
else {
sigKillInCurrentThread(pid, isProcessGroup, sleeptimeBeforeSigkill);
}
}
/**
* Sends kill signal to process, forcefully terminating the process.
*
* @param pid process id
*/
public static void killProcess(String pid) {
maybeSignalProcess(pid, SIGKILL, SIGKILL_STR, false);
}
/**
* Sends SIGQUIT to process; Java programs will dump their stack to
* stdout.
*
* @param pid process id
*/
public static void sigQuitProcess(String pid) {
maybeSignalProcess(pid, SIGQUIT, SIGQUIT_STR, false);
}
/**
* Sends kill signal to all process belonging to same process group,
* forcefully terminating the process group.
*
* @param pgrpId process group id
*/
public static void killProcessGroup(String pgrpId) {
maybeSignalProcessGroup(pgrpId, SIGKILL, SIGKILL_STR, false);
}
/**
* Sends SIGQUIT to all processes belonging to the same process group,
* ordering all processes in the group to send their stack dump to
* stdout.
*
* @param pgrpId process group id
*/
public static void sigQuitProcessGroup(String pgrpId) {
maybeSignalProcessGroup(pgrpId, SIGQUIT, SIGQUIT_STR, false);
}
/**
* Is the process with PID pid still alive?
* This method assumes that isAlive is called on a pid that was alive not
* too long ago, and hence assumes no chance of pid-wrapping-around.
*
* @param pid pid of the process to check.
* @return true if process is alive.
*/
public static boolean isAlive(String pid) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-0", pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (ExitCodeException ee) {
return false;
} catch (IOException ioe) {
LOG.warn("Error executing shell command "
+ shexec.toString() + ioe);
return false;
}
return (shexec.getExitCode() == 0 ? true : false);
}
/**
* Is the process group with still alive?
*
* This method assumes that isAlive is called on a pid that was alive not
* too long ago, and hence assumes no chance of pid-wrapping-around.
*
* @param pgrpId process group id
* @return true if any of process in group is alive.
*/
public static boolean isProcessGroupAlive(String pgrpId) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-0", "-"+pgrpId };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (ExitCodeException ee) {
return false;
} catch (IOException ioe) {
LOG.warn("Error executing shell command "
+ shexec.toString() + ioe);
return false;
}
return (shexec.getExitCode() == 0 ? true : false);
}
/**
* Helper thread class that kills process-tree with SIGKILL in background
*/
static class SigKillThread extends Thread {
private String pid = null;
private boolean isProcessGroup = false;
private long sleepTimeBeforeSigKill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
private SigKillThread(String pid, boolean isProcessGroup, long interval) {
this.pid = pid;
this.isProcessGroup = isProcessGroup;
this.setName(this.getClass().getName() + "-" + pid);
sleepTimeBeforeSigKill = interval;
}
public void run() {
sigKillInCurrentThread(pid, isProcessGroup, sleepTimeBeforeSigKill);
}
}
}
| 12,332 | 34.036932 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.text.ParseException;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.counters.AbstractCounters;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
/**
* String conversion utilities for counters.
* Candidate for deprecation since we start to use JSON in 0.21+
*/
@InterfaceAudience.Private
public class CountersStrings {
private static final char GROUP_OPEN = '{';
private static final char GROUP_CLOSE = '}';
private static final char COUNTER_OPEN = '[';
private static final char COUNTER_CLOSE = ']';
private static final char UNIT_OPEN = '(';
private static final char UNIT_CLOSE = ')';
private static char[] charsToEscape = {GROUP_OPEN, GROUP_CLOSE,
COUNTER_OPEN, COUNTER_CLOSE,
UNIT_OPEN, UNIT_CLOSE};
/**
* Make the pre 0.21 counter string (for e.g. old job history files)
* [(actual-name)(display-name)(value)]
* @param counter to stringify
* @return the stringified result
*/
public static String toEscapedCompactString(Counter counter) {
// First up, obtain the strings that need escaping. This will help us
// determine the buffer length apriori.
String escapedName, escapedDispName;
long currentValue;
synchronized(counter) {
escapedName = escape(counter.getName());
escapedDispName = escape(counter.getDisplayName());
currentValue = counter.getValue();
}
int length = escapedName.length() + escapedDispName.length() + 4;
length += 8; // For the following delimiting characters
StringBuilder builder = new StringBuilder(length);
builder.append(COUNTER_OPEN);
// Add the counter name
builder.append(UNIT_OPEN);
builder.append(escapedName);
builder.append(UNIT_CLOSE);
// Add the display name
builder.append(UNIT_OPEN);
builder.append(escapedDispName);
builder.append(UNIT_CLOSE);
// Add the value
builder.append(UNIT_OPEN);
builder.append(currentValue);
builder.append(UNIT_CLOSE);
builder.append(COUNTER_CLOSE);
return builder.toString();
}
/**
* Make the 0.21 counter group string.
* format: {(actual-name)(display-name)(value)[][][]}
* where [] are compact strings for the counters within.
* @param <G> type of the group
* @param group to stringify
* @return the stringified result
*/
public static <G extends CounterGroupBase<?>>
String toEscapedCompactString(G group) {
List<String> escapedStrs = Lists.newArrayList();
int length;
String escapedName, escapedDispName;
synchronized(group) {
// First up, obtain the strings that need escaping. This will help us
// determine the buffer length apriori.
escapedName = escape(group.getName());
escapedDispName = escape(group.getDisplayName());
int i = 0;
length = escapedName.length() + escapedDispName.length();
for (Counter counter : group) {
String escapedStr = toEscapedCompactString(counter);
escapedStrs.add(escapedStr);
length += escapedStr.length();
}
}
length += 6; // for all the delimiting characters below
StringBuilder builder = new StringBuilder(length);
builder.append(GROUP_OPEN); // group start
// Add the group name
builder.append(UNIT_OPEN);
builder.append(escapedName);
builder.append(UNIT_CLOSE);
// Add the display name
builder.append(UNIT_OPEN);
builder.append(escapedDispName);
builder.append(UNIT_CLOSE);
// write the value
for(String escaped : escapedStrs) {
builder.append(escaped);
}
builder.append(GROUP_CLOSE); // group end
return builder.toString();
}
/**
* Make the pre 0.21 counters string
* @param <C> type of the counter
* @param <G> type of the counter group
* @param <T> type of the counters object
* @param counters the object to stringify
* @return the string in the following format
* {(groupName)(group-displayName)[(counterName)(displayName)(value)]*}*
*/
public static <C extends Counter, G extends CounterGroupBase<C>,
T extends AbstractCounters<C, G>>
String toEscapedCompactString(T counters) {
StringBuilder builder = new StringBuilder();
synchronized(counters) {
for (G group : counters) {
builder.append(toEscapedCompactString(group));
}
}
return builder.toString();
}
// Escapes all the delimiters for counters i.e {,[,(,),],}
private static String escape(String string) {
return StringUtils.escapeString(string, StringUtils.ESCAPE_CHAR,
charsToEscape);
}
// Unescapes all the delimiters for counters i.e {,[,(,),],}
private static String unescape(String string) {
return StringUtils.unEscapeString(string, StringUtils.ESCAPE_CHAR,
charsToEscape);
}
// Extracts a block (data enclosed within delimeters) ignoring escape
// sequences. Throws ParseException if an incomplete block is found else
// returns null.
private static String getBlock(String str, char open, char close,
IntWritable index) throws ParseException {
StringBuilder split = new StringBuilder();
int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR,
index.get(), split);
split.setLength(0); // clear the buffer
if (next >= 0) {
++next; // move over '('
next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR,
next, split);
if (next >= 0) {
++next; // move over ')'
index.set(next);
return split.toString(); // found a block
} else {
throw new ParseException("Unexpected end of block", next);
}
}
return null; // found nothing
}
/**
* Parse a pre 0.21 counters string into a counter object.
* @param <C> type of the counter
* @param <G> type of the counter group
* @param <T> type of the counters object
* @param compactString to parse
* @param counters an empty counters object to hold the result
* @return the counters object holding the result
* @throws ParseException
*/
@SuppressWarnings("deprecation")
public static <C extends Counter, G extends CounterGroupBase<C>,
T extends AbstractCounters<C, G>>
T parseEscapedCompactString(String compactString, T counters)
throws ParseException {
IntWritable index = new IntWritable(0);
// Get the group to work on
String groupString =
getBlock(compactString, GROUP_OPEN, GROUP_CLOSE, index);
while (groupString != null) {
IntWritable groupIndex = new IntWritable(0);
// Get the actual name
String groupName =
StringInterner.weakIntern(getBlock(groupString, UNIT_OPEN, UNIT_CLOSE, groupIndex));
groupName = StringInterner.weakIntern(unescape(groupName));
// Get the display name
String groupDisplayName =
StringInterner.weakIntern(getBlock(groupString, UNIT_OPEN, UNIT_CLOSE, groupIndex));
groupDisplayName = StringInterner.weakIntern(unescape(groupDisplayName));
// Get the counters
G group = counters.getGroup(groupName);
group.setDisplayName(groupDisplayName);
String counterString =
getBlock(groupString, COUNTER_OPEN, COUNTER_CLOSE, groupIndex);
while (counterString != null) {
IntWritable counterIndex = new IntWritable(0);
// Get the actual name
String counterName =
StringInterner.weakIntern(getBlock(counterString, UNIT_OPEN, UNIT_CLOSE, counterIndex));
counterName = StringInterner.weakIntern(unescape(counterName));
// Get the display name
String counterDisplayName =
StringInterner.weakIntern(getBlock(counterString, UNIT_OPEN, UNIT_CLOSE, counterIndex));
counterDisplayName = StringInterner.weakIntern(unescape(counterDisplayName));
// Get the value
long value =
Long.parseLong(getBlock(counterString, UNIT_OPEN, UNIT_CLOSE,
counterIndex));
// Add the counter
Counter counter = group.findCounter(counterName);
counter.setDisplayName(counterDisplayName);
counter.increment(value);
// Get the next counter
counterString =
getBlock(groupString, COUNTER_OPEN, COUNTER_CLOSE, groupIndex);
}
groupString = getBlock(compactString, GROUP_OPEN, GROUP_CLOSE, index);
}
return counters;
}
}
| 9,732 | 34.521898 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/v2/LogParams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
public class LogParams {
private String containerId;
private String applicationId;
private String nodeId;
private String owner;
public LogParams(String containerIdStr, String applicationIdStr,
String nodeIdStr, String owner) {
this.containerId = containerIdStr;
this.applicationId = applicationIdStr;
this.nodeId = nodeIdStr;
this.owner = owner;
}
public String getContainerId() {
return containerId;
}
public void setContainerId(String containerId) {
this.containerId = containerId;
}
public String getApplicationId() {
return applicationId;
}
public void setApplicationId(String applicationId) {
this.applicationId = applicationId;
}
public String getNodeId() {
return nodeId;
}
public void setNodeId(String nodeId) {
this.nodeId = nodeId;
}
public String getOwner() {
return this.owner;
}
public String setOwner(String owner) {
return this.owner;
}
}
| 1,811 | 25.647059 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
package org.apache.hadoop.mapreduce.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,025 | 43.608696 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocolProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.protocol;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@InterfaceAudience.Private
public abstract class ClientProtocolProvider {
public abstract ClientProtocol create(Configuration conf) throws IOException;
public abstract ClientProtocol create(InetSocketAddress addr,
Configuration conf) throws IOException;
public abstract void close(ClientProtocol clientProtocol) throws IOException;
}
| 1,378 | 35.289474 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.QueueAclsInfo;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
/**
* Protocol that a JobClient and the central JobTracker use to communicate. The
* JobClient can use these methods to submit a Job for execution, and learn about
* the current system status.
*/
@KerberosInfo(
serverPrincipal = JTConfig.JT_USER_NAME)
@TokenInfo(DelegationTokenSelector.class)
@InterfaceAudience.Private
@InterfaceStability.Stable
public interface ClientProtocol extends VersionedProtocol {
/*
*Changing the versionID to 2L since the getTaskCompletionEvents method has
*changed.
*Changed to 4 since killTask(String,boolean) is added
*Version 4: added jobtracker state to ClusterStatus
*Version 5: max_tasks in ClusterStatus is replaced by
* max_map_tasks and max_reduce_tasks for HADOOP-1274
* Version 6: change the counters representation for HADOOP-2248
* Version 7: added getAllJobs for HADOOP-2487
* Version 8: change {job|task}id's to use corresponding objects rather that strings.
* Version 9: change the counter representation for HADOOP-1915
* Version 10: added getSystemDir for HADOOP-3135
* Version 11: changed JobProfile to include the queue name for HADOOP-3698
* Version 12: Added getCleanupTaskReports and
* cleanupProgress to JobStatus as part of HADOOP-3150
* Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
* and getAllJobs(queue) as a part of HADOOP-3930
* Version 14: Added setPriority for HADOOP-4124
* Version 15: Added KILLED status to JobStatus as part of HADOOP-3924
* Version 16: Added getSetupTaskReports and
* setupProgress to JobStatus as part of HADOOP-4261
* Version 17: getClusterStatus returns the amount of memory used by
* the server. HADOOP-4435
* Version 18: Added blacklisted trackers to the ClusterStatus
* for HADOOP-4305
* Version 19: Modified TaskReport to have TIP status and modified the
* method getClusterStatus() to take a boolean argument
* for HADOOP-4807
* Version 20: Modified ClusterStatus to have the tasktracker expiry
* interval for HADOOP-4939
* Version 21: Modified TaskID to be aware of the new TaskTypes
* Version 22: Added method getQueueAclsForCurrentUser to get queue acls info
* for a user
* Version 23: Modified the JobQueueInfo class to inlucde queue state.
* Part of HADOOP-5913.
* Version 24: Modified ClusterStatus to include BlackListInfo class which
* encapsulates reasons and report for blacklisted node.
* Version 25: Added fields to JobStatus for HADOOP-817.
* Version 26: Added properties to JobQueueInfo as part of MAPREDUCE-861.
* added new api's getRootQueues and
* getChildQueues(String queueName)
* Version 27: Changed protocol to use new api objects. And the protocol is
* renamed from JobSubmissionProtocol to ClientProtocol.
* Version 28: Added getJobHistoryDir() as part of MAPREDUCE-975.
* Version 29: Added reservedSlots, runningTasks and totalJobSubmissions
* to ClusterMetrics as part of MAPREDUCE-1048.
* Version 30: Job submission files are uploaded to a staging area under
* user home dir. JobTracker reads the required files from the
* staging area using user credentials passed via the rpc.
* Version 31: Added TokenStorage to submitJob
* Version 32: Added delegation tokens (add, renew, cancel)
* Version 33: Added JobACLs to JobStatus as part of MAPREDUCE-1307
* Version 34: Modified submitJob to use Credentials instead of TokenStorage.
* Version 35: Added the method getQueueAdmins(queueName) as part of
* MAPREDUCE-1664.
* Version 36: Added the method getJobTrackerStatus() as part of
* MAPREDUCE-2337.
* Version 37: More efficient serialization format for framework counters
* (MAPREDUCE-901)
* Version 38: Added getLogFilePath(JobID, TaskAttemptID) as part of
* MAPREDUCE-3146
*/
public static final long versionID = 37L;
/**
* Allocate a name for the job.
* @return a unique job name for submitting jobs.
* @throws IOException
*/
public JobID getNewJobID() throws IOException, InterruptedException;
/**
* Submit a Job for execution. Returns the latest profile for
* that job.
*/
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException;
/**
* Get the current status of the cluster
*
* @return summary of the state of the cluster
*/
public ClusterMetrics getClusterMetrics()
throws IOException, InterruptedException;
/**
* Get the JobTracker's status.
*
* @return {@link JobTrackerStatus} of the JobTracker
* @throws IOException
* @throws InterruptedException
*/
public JobTrackerStatus getJobTrackerStatus() throws IOException,
InterruptedException;
public long getTaskTrackerExpiryInterval() throws IOException,
InterruptedException;
/**
* Get the administrators of the given job-queue.
* This method is for hadoop internal use only.
* @param queueName
* @return Queue administrators ACL for the queue to which job is
* submitted to
* @throws IOException
*/
public AccessControlList getQueueAdmins(String queueName) throws IOException;
/**
* Kill the indicated job
*/
public void killJob(JobID jobid) throws IOException, InterruptedException;
/**
* Set the priority of the specified job
* @param jobid ID of the job
* @param priority Priority to be set for the job
*/
public void setJobPriority(JobID jobid, String priority)
throws IOException, InterruptedException;
/**
* Kill indicated task attempt.
* @param taskId the id of the task to kill.
* @param shouldFail if true the task is failed and added to failed tasks list, otherwise
* it is just killed, w/o affecting job failure status.
*/
public boolean killTask(TaskAttemptID taskId, boolean shouldFail)
throws IOException, InterruptedException;
/**
* Grab a handle to a job that is already known to the JobTracker.
* @return Status of the job, or null if not found.
*/
public JobStatus getJobStatus(JobID jobid)
throws IOException, InterruptedException;
/**
* Grab the current job counters
*/
public Counters getJobCounters(JobID jobid)
throws IOException, InterruptedException;
/**
* Grab a bunch of info on the tasks that make up the job
*/
public TaskReport[] getTaskReports(JobID jobid, TaskType type)
throws IOException, InterruptedException;
/**
* A MapReduce system always operates on a single filesystem. This
* function returns the fs name. ('local' if the localfs; 'addr:port'
* if dfs). The client can then copy files into the right locations
* prior to submitting the job.
*/
public String getFilesystemName() throws IOException, InterruptedException;
/**
* Get all the jobs submitted.
* @return array of JobStatus for the submitted jobs
*/
public JobStatus[] getAllJobs() throws IOException, InterruptedException;
/**
* Get task completion events for the jobid, starting from fromEventId.
* Returns empty array if no events are available.
* @param jobid job id
* @param fromEventId event id to start from.
* @param maxEvents the max number of events we want to look at
* @return array of task completion events.
* @throws IOException
*/
public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid,
int fromEventId, int maxEvents) throws IOException, InterruptedException;
/**
* Get the diagnostics for a given task in a given job
* @param taskId the id of the task
* @return an array of the diagnostic messages
*/
public String[] getTaskDiagnostics(TaskAttemptID taskId)
throws IOException, InterruptedException;
/**
* Get all active trackers in cluster.
* @return array of TaskTrackerInfo
*/
public TaskTrackerInfo[] getActiveTrackers()
throws IOException, InterruptedException;
/**
* Get all blacklisted trackers in cluster.
* @return array of TaskTrackerInfo
*/
public TaskTrackerInfo[] getBlacklistedTrackers()
throws IOException, InterruptedException;
/**
* Grab the jobtracker system directory path
* where job-specific files are to be placed.
*
* @return the system directory where job-specific files are to be placed.
*/
public String getSystemDir() throws IOException, InterruptedException;
/**
* Get a hint from the JobTracker
* where job-specific files are to be placed.
*
* @return the directory where job-specific files are to be placed.
*/
public String getStagingAreaDir() throws IOException, InterruptedException;
/**
* Gets the directory location of the completed job history files.
* @throws IOException
* @throws InterruptedException
*/
public String getJobHistoryDir()
throws IOException, InterruptedException;
/**
* Gets set of Queues associated with the Job Tracker
*
* @return Array of the Queue Information Object
* @throws IOException
*/
public QueueInfo[] getQueues() throws IOException, InterruptedException;
/**
* Gets scheduling information associated with the particular Job queue
*
* @param queueName Queue Name
* @return Scheduling Information of the Queue
* @throws IOException
*/
public QueueInfo getQueue(String queueName)
throws IOException, InterruptedException;
/**
* Gets the Queue ACLs for current user
* @return array of QueueAclsInfo object for current user.
* @throws IOException
*/
public QueueAclsInfo[] getQueueAclsForCurrentUser()
throws IOException, InterruptedException;
/**
* Gets the root level queues.
* @return array of JobQueueInfo object.
* @throws IOException
*/
public QueueInfo[] getRootQueues() throws IOException, InterruptedException;
/**
* Returns immediate children of queueName.
* @param queueName
* @return array of JobQueueInfo which are children of queueName
* @throws IOException
*/
public QueueInfo[] getChildQueues(String queueName)
throws IOException, InterruptedException;
/**
* Get a new delegation token.
* @param renewer the user other than the creator (if any) that can renew the
* token
* @return the new delegation token
* @throws IOException
* @throws InterruptedException
*/
public
Token<DelegationTokenIdentifier> getDelegationToken(Text renewer
) throws IOException,
InterruptedException;
/**
* Renew an existing delegation token
* @param token the token to renew
* @return the new expiration time
* @throws IOException
* @throws InterruptedException
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws IOException,
InterruptedException;
/**
* Cancel a delegation token.
* @param token the token to cancel
* @throws IOException
* @throws InterruptedException
*/
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
) throws IOException,
InterruptedException;
/**
* Gets the location of the log file for a job if no taskAttemptId is
* specified, otherwise gets the log location for the taskAttemptId.
* @param jobID the jobId.
* @param taskAttemptID the taskAttemptId.
* @return log params.
* @throws IOException
* @throws InterruptedException
*/
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
throws IOException, InterruptedException;
}
| 14,330 | 37.732432 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.task;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,024 | 41.708333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapred.BackupStore;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.ReduceContext;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progressable;
/**
* The context passed to the {@link Reducer}.
* @param <KEYIN> the class of the input keys
* @param <VALUEIN> the class of the input values
* @param <KEYOUT> the class of the output keys
* @param <VALUEOUT> the class of the output values
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ReduceContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskInputOutputContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
implements ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private RawKeyValueIterator input;
private Counter inputValueCounter;
private Counter inputKeyCounter;
private RawComparator<KEYIN> comparator;
private KEYIN key; // current key
private VALUEIN value; // current value
private boolean firstValue = false; // first value in key
private boolean nextKeyIsSame = false; // more w/ this key
private boolean hasMore; // more in file
protected Progressable reporter;
private Deserializer<KEYIN> keyDeserializer;
private Deserializer<VALUEIN> valueDeserializer;
private DataInputBuffer buffer = new DataInputBuffer();
private BytesWritable currentRawKey = new BytesWritable();
private ValueIterable iterable = new ValueIterable();
private boolean isMarked = false;
private BackupStore<KEYIN,VALUEIN> backupStore;
private final SerializationFactory serializationFactory;
private final Class<KEYIN> keyClass;
private final Class<VALUEIN> valueClass;
private final Configuration conf;
private final TaskAttemptID taskid;
private int currentKeyLength = -1;
private int currentValueLength = -1;
public ReduceContextImpl(Configuration conf, TaskAttemptID taskid,
RawKeyValueIterator input,
Counter inputKeyCounter,
Counter inputValueCounter,
RecordWriter<KEYOUT,VALUEOUT> output,
OutputCommitter committer,
StatusReporter reporter,
RawComparator<KEYIN> comparator,
Class<KEYIN> keyClass,
Class<VALUEIN> valueClass
) throws InterruptedException, IOException{
super(conf, taskid, output, committer, reporter);
this.input = input;
this.inputKeyCounter = inputKeyCounter;
this.inputValueCounter = inputValueCounter;
this.comparator = comparator;
this.serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(buffer);
this.valueDeserializer = serializationFactory.getDeserializer(valueClass);
this.valueDeserializer.open(buffer);
hasMore = input.next();
this.keyClass = keyClass;
this.valueClass = valueClass;
this.conf = conf;
this.taskid = taskid;
}
/** Start processing next unique key. */
public boolean nextKey() throws IOException,InterruptedException {
while (hasMore && nextKeyIsSame) {
nextKeyValue();
}
if (hasMore) {
if (inputKeyCounter != null) {
inputKeyCounter.increment(1);
}
return nextKeyValue();
} else {
return false;
}
}
/**
* Advance to the next key/value pair.
*/
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!hasMore) {
key = null;
value = null;
return false;
}
firstValue = !nextKeyIsSame;
DataInputBuffer nextKey = input.getKey();
currentRawKey.set(nextKey.getData(), nextKey.getPosition(),
nextKey.getLength() - nextKey.getPosition());
buffer.reset(currentRawKey.getBytes(), 0, currentRawKey.getLength());
key = keyDeserializer.deserialize(key);
DataInputBuffer nextVal = input.getValue();
buffer.reset(nextVal.getData(), nextVal.getPosition(), nextVal.getLength()
- nextVal.getPosition());
value = valueDeserializer.deserialize(value);
currentKeyLength = nextKey.getLength() - nextKey.getPosition();
currentValueLength = nextVal.getLength() - nextVal.getPosition();
if (isMarked) {
backupStore.write(nextKey, nextVal);
}
hasMore = input.next();
if (hasMore) {
nextKey = input.getKey();
nextKeyIsSame = comparator.compare(currentRawKey.getBytes(), 0,
currentRawKey.getLength(),
nextKey.getData(),
nextKey.getPosition(),
nextKey.getLength() - nextKey.getPosition()
) == 0;
} else {
nextKeyIsSame = false;
}
inputValueCounter.increment(1);
return true;
}
public KEYIN getCurrentKey() {
return key;
}
@Override
public VALUEIN getCurrentValue() {
return value;
}
BackupStore<KEYIN,VALUEIN> getBackupStore() {
return backupStore;
}
protected class ValueIterator implements ReduceContext.ValueIterator<VALUEIN> {
private boolean inReset = false;
private boolean clearMarkFlag = false;
@Override
public boolean hasNext() {
try {
if (inReset && backupStore.hasNext()) {
return true;
}
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("hasNext failed", e);
}
return firstValue || nextKeyIsSame;
}
@Override
public VALUEIN next() {
if (inReset) {
try {
if (backupStore.hasNext()) {
backupStore.next();
DataInputBuffer next = backupStore.nextValue();
buffer.reset(next.getData(), next.getPosition(), next.getLength()
- next.getPosition());
value = valueDeserializer.deserialize(value);
return value;
} else {
inReset = false;
backupStore.exitResetMode();
if (clearMarkFlag) {
clearMarkFlag = false;
isMarked = false;
}
}
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("next value iterator failed", e);
}
}
// if this is the first record, we don't need to advance
if (firstValue) {
firstValue = false;
return value;
}
// if this isn't the first record and the next key is different, they
// can't advance it here.
if (!nextKeyIsSame) {
throw new NoSuchElementException("iterate past last value");
}
// otherwise, go to the next key/value pair
try {
nextKeyValue();
return value;
} catch (IOException ie) {
throw new RuntimeException("next value iterator failed", ie);
} catch (InterruptedException ie) {
// this is bad, but we can't modify the exception list of java.util
throw new RuntimeException("next value iterator interrupted", ie);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove not implemented");
}
@Override
public void mark() throws IOException {
if (getBackupStore() == null) {
backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
}
isMarked = true;
if (!inReset) {
backupStore.reinitialize();
if (currentKeyLength == -1) {
// The user has not called next() for this iterator yet, so
// there is no current record to mark and copy to backup store.
return;
}
assert (currentValueLength != -1);
int requestedSize = currentKeyLength + currentValueLength +
WritableUtils.getVIntSize(currentKeyLength) +
WritableUtils.getVIntSize(currentValueLength);
DataOutputStream out = backupStore.getOutputStream(requestedSize);
writeFirstKeyValueBytes(out);
backupStore.updateCounters(requestedSize);
} else {
backupStore.mark();
}
}
@Override
public void reset() throws IOException {
// We reached the end of an iteration and user calls a
// reset, but a clearMark was called before, just throw
// an exception
if (clearMarkFlag) {
clearMarkFlag = false;
backupStore.clearMark();
throw new IOException("Reset called without a previous mark");
}
if (!isMarked) {
throw new IOException("Reset called without a previous mark");
}
inReset = true;
backupStore.reset();
}
@Override
public void clearMark() throws IOException {
if (getBackupStore() == null) {
return;
}
if (inReset) {
clearMarkFlag = true;
backupStore.clearMark();
} else {
inReset = isMarked = false;
backupStore.reinitialize();
}
}
/**
* This method is called when the reducer moves from one key to
* another.
* @throws IOException
*/
public void resetBackupStore() throws IOException {
if (getBackupStore() == null) {
return;
}
inReset = isMarked = false;
backupStore.reinitialize();
currentKeyLength = -1;
}
/**
* This method is called to write the record that was most recently
* served (before a call to the mark). Since the framework reads one
* record in advance, to get this record, we serialize the current key
* and value
* @param out
* @throws IOException
*/
private void writeFirstKeyValueBytes(DataOutputStream out)
throws IOException {
assert (getCurrentKey() != null && getCurrentValue() != null);
WritableUtils.writeVInt(out, currentKeyLength);
WritableUtils.writeVInt(out, currentValueLength);
Serializer<KEYIN> keySerializer =
serializationFactory.getSerializer(keyClass);
keySerializer.open(out);
keySerializer.serialize(getCurrentKey());
Serializer<VALUEIN> valueSerializer =
serializationFactory.getSerializer(valueClass);
valueSerializer.open(out);
valueSerializer.serialize(getCurrentValue());
}
}
protected class ValueIterable implements Iterable<VALUEIN> {
private ValueIterator iterator = new ValueIterator();
@Override
public Iterator<VALUEIN> iterator() {
return iterator;
}
}
/**
* Iterate through the values for the current key, reusing the same value
* object, which is stored in the context.
* @return the series of values associated with the current key. All of the
* objects returned directly and indirectly from this method are reused.
*/
public
Iterable<VALUEIN> getValues() throws IOException, InterruptedException {
return iterable;
}
}
| 12,885 | 33.921409 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskAttemptContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* The context for task attempts.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskAttemptContextImpl extends JobContextImpl
implements TaskAttemptContext {
private final TaskAttemptID taskId;
private String status = "";
private StatusReporter reporter;
public TaskAttemptContextImpl(Configuration conf,
TaskAttemptID taskId) {
this(conf, taskId, new DummyReporter());
}
public TaskAttemptContextImpl(Configuration conf,
TaskAttemptID taskId, StatusReporter reporter) {
super(conf, taskId.getJobID());
this.taskId = taskId;
this.reporter = reporter;
}
/**
* Get the unique name for this task attempt.
*/
public TaskAttemptID getTaskAttemptID() {
return taskId;
}
/**
* Get the last set status message.
* @return the current status message
*/
public String getStatus() {
return status;
}
@Override
public Counter getCounter(Enum<?> counterName) {
return reporter.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return reporter.getCounter(groupName, counterName);
}
/**
* Report progress.
*/
@Override
public void progress() {
reporter.progress();
}
protected void setStatusString(String status) {
this.status = status;
}
/**
* Set the current status of the task to the given string.
*/
@Override
public void setStatus(String status) {
String normalizedStatus = Task.normalizeStatus(status, conf);
setStatusString(normalizedStatus);
reporter.setStatus(normalizedStatus);
}
public static class DummyReporter extends StatusReporter {
public void setStatus(String s) {
}
public void progress() {
}
public Counter getCounter(Enum<?> name) {
return new Counters().findCounter(name);
}
public Counter getCounter(String group, String name) {
return new Counters().findCounter(group, name);
}
public float getProgress() {
return 0f;
}
}
@Override
public float getProgress() {
return reporter.getProgress();
}
}
| 3,453 | 27.545455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/MapContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* The context that is given to the {@link Mapper}.
* @param <KEYIN> the key input type to the Mapper
* @param <VALUEIN> the value input type to the Mapper
* @param <KEYOUT> the key output type from the Mapper
* @param <VALUEOUT> the value output type from the Mapper
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MapContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskInputOutputContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
implements MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private RecordReader<KEYIN,VALUEIN> reader;
private InputSplit split;
public MapContextImpl(Configuration conf, TaskAttemptID taskid,
RecordReader<KEYIN,VALUEIN> reader,
RecordWriter<KEYOUT,VALUEOUT> writer,
OutputCommitter committer,
StatusReporter reporter,
InputSplit split) {
super(conf, taskid, writer, committer, reporter);
this.reader = reader;
this.split = split;
}
/**
* Get the input split for this map.
*/
public InputSplit getInputSplit() {
return split;
}
@Override
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return reader.getCurrentKey();
}
@Override
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return reader.getCurrentValue();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return reader.nextKeyValue();
}
}
| 2,981 | 34.5 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/TaskInputOutputContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
/**
* A context object that allows input and output from the task. It is only
* supplied to the {@link Mapper} or {@link Reducer}.
* @param <KEYIN> the input key type for the task
* @param <VALUEIN> the input value type for the task
* @param <KEYOUT> the output key type for the task
* @param <VALUEOUT> the output value type for the task
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class TaskInputOutputContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskAttemptContextImpl
implements TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private RecordWriter<KEYOUT,VALUEOUT> output;
private OutputCommitter committer;
public TaskInputOutputContextImpl(Configuration conf, TaskAttemptID taskid,
RecordWriter<KEYOUT,VALUEOUT> output,
OutputCommitter committer,
StatusReporter reporter) {
super(conf, taskid, reporter);
this.output = output;
this.committer = committer;
}
/**
* Advance to the next key, value pair, returning null if at end.
* @return the key object that was read into, or null if no more
*/
public abstract
boolean nextKeyValue() throws IOException, InterruptedException;
/**
* Get the current key.
* @return the current key object or null if there isn't one
* @throws IOException
* @throws InterruptedException
*/
public abstract
KEYIN getCurrentKey() throws IOException, InterruptedException;
/**
* Get the current value.
* @return the value object that was read into
* @throws IOException
* @throws InterruptedException
*/
public abstract VALUEIN getCurrentValue() throws IOException,
InterruptedException;
/**
* Generate an output key/value pair.
*/
public void write(KEYOUT key, VALUEOUT value
) throws IOException, InterruptedException {
output.write(key, value);
}
public OutputCommitter getOutputCommitter() {
return committer;
}
}
| 3,500 | 35.46875 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
/**
* A read-only view of the job that is provided to the tasks while they
* are running.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobContextImpl implements JobContext {
protected final org.apache.hadoop.mapred.JobConf conf;
private JobID jobId;
/**
* The UserGroupInformation object that has a reference to the current user
*/
protected UserGroupInformation ugi;
protected final Credentials credentials;
public JobContextImpl(Configuration conf, JobID jobId) {
if (conf instanceof JobConf) {
this.conf = (JobConf)conf;
} else {
this.conf = new JobConf(conf);
}
this.jobId = jobId;
this.credentials = this.conf.getCredentials();
try {
this.ugi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Return the configuration for the job.
* @return the shared configuration object
*/
public Configuration getConfiguration() {
return conf;
}
/**
* Get the unique ID for the job.
* @return the object with the job id
*/
public JobID getJobID() {
return jobId;
}
/**
* Set the JobID.
*/
public void setJobID(JobID jobId) {
this.jobId = jobId;
}
/**
* Get configured the number of reduce tasks for this job. Defaults to
* <code>1</code>.
* @return the number of reduce tasks for this job.
*/
public int getNumReduceTasks() {
return conf.getNumReduceTasks();
}
/**
* Get the current working directory for the default file system.
*
* @return the directory name.
*/
public Path getWorkingDirectory() throws IOException {
return conf.getWorkingDirectory();
}
/**
* Get the key class for the job output data.
* @return the key class for the job output data.
*/
public Class<?> getOutputKeyClass() {
return conf.getOutputKeyClass();
}
/**
* Get the value class for job outputs.
* @return the value class for job outputs.
*/
public Class<?> getOutputValueClass() {
return conf.getOutputValueClass();
}
/**
* Get the key class for the map output data. If it is not set, use the
* (final) output key class. This allows the map output key class to be
* different than the final output key class.
* @return the map output key class.
*/
public Class<?> getMapOutputKeyClass() {
return conf.getMapOutputKeyClass();
}
/**
* Get the value class for the map output data. If it is not set, use the
* (final) output value class This allows the map output value class to be
* different than the final output value class.
*
* @return the map output value class.
*/
public Class<?> getMapOutputValueClass() {
return conf.getMapOutputValueClass();
}
/**
* Get the user-specified job name. This is only used to identify the
* job to the user.
*
* @return the job's name, defaulting to "".
*/
public String getJobName() {
return conf.getJobName();
}
/**
* Get the {@link InputFormat} class for the job.
*
* @return the {@link InputFormat} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends InputFormat<?,?>> getInputFormatClass()
throws ClassNotFoundException {
return (Class<? extends InputFormat<?,?>>)
conf.getClass(INPUT_FORMAT_CLASS_ATTR, TextInputFormat.class);
}
/**
* Get the {@link Mapper} class for the job.
*
* @return the {@link Mapper} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends Mapper<?,?,?,?>> getMapperClass()
throws ClassNotFoundException {
return (Class<? extends Mapper<?,?,?,?>>)
conf.getClass(MAP_CLASS_ATTR, Mapper.class);
}
/**
* Get the combiner class for the job.
*
* @return the combiner class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends Reducer<?,?,?,?>> getCombinerClass()
throws ClassNotFoundException {
return (Class<? extends Reducer<?,?,?,?>>)
conf.getClass(COMBINE_CLASS_ATTR, null);
}
/**
* Get the {@link Reducer} class for the job.
*
* @return the {@link Reducer} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends Reducer<?,?,?,?>> getReducerClass()
throws ClassNotFoundException {
return (Class<? extends Reducer<?,?,?,?>>)
conf.getClass(REDUCE_CLASS_ATTR, Reducer.class);
}
/**
* Get the {@link OutputFormat} class for the job.
*
* @return the {@link OutputFormat} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends OutputFormat<?,?>> getOutputFormatClass()
throws ClassNotFoundException {
return (Class<? extends OutputFormat<?,?>>)
conf.getClass(OUTPUT_FORMAT_CLASS_ATTR, TextOutputFormat.class);
}
/**
* Get the {@link Partitioner} class for the job.
*
* @return the {@link Partitioner} class for the job.
*/
@SuppressWarnings("unchecked")
public Class<? extends Partitioner<?,?>> getPartitionerClass()
throws ClassNotFoundException {
return (Class<? extends Partitioner<?,?>>)
conf.getClass(PARTITIONER_CLASS_ATTR, HashPartitioner.class);
}
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator<?> getSortComparator() {
return conf.getOutputKeyComparator();
}
/**
* Get the pathname of the job's jar.
* @return the pathname
*/
public String getJar() {
return conf.getJar();
}
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see Job#setCombinerKeyGroupingComparatorClass(Class) for details.
*/
public RawComparator<?> getCombinerKeyGroupingComparator() {
return conf.getCombinerKeyGroupingComparator();
}
/**
* Get the user defined {@link RawComparator} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see Job#setGroupingComparatorClass(Class) for details.
*/
public RawComparator<?> getGroupingComparator() {
return conf.getOutputValueGroupingComparator();
}
/**
* Get whether job-setup and job-cleanup is needed for the job
*
* @return boolean
*/
public boolean getJobSetupCleanupNeeded() {
return conf.getBoolean(MRJobConfig.SETUP_CLEANUP_NEEDED, true);
}
/**
* Get whether task-cleanup is needed for the job
*
* @return boolean
*/
public boolean getTaskCleanupNeeded() {
return conf.getBoolean(MRJobConfig.TASK_CLEANUP_NEEDED, true);
}
/**
* This method checks to see if symlinks are to be create for the
* localized cache files in the current working directory
* @return true if symlinks are to be created- else return false
*/
public boolean getSymlink() {
return DistributedCache.getSymlink(conf);
}
/**
* Get the archive entries in classpath as an array of Path
*/
public Path[] getArchiveClassPaths() {
return DistributedCache.getArchiveClassPaths(conf);
}
/**
* Get cache archives set in the Configuration
* @return A URI array of the caches set in the Configuration
* @throws IOException
*/
public URI[] getCacheArchives() throws IOException {
return DistributedCache.getCacheArchives(conf);
}
/**
* Get cache files set in the Configuration
* @return A URI array of the files set in the Configuration
* @throws IOException
*/
public URI[] getCacheFiles() throws IOException {
return DistributedCache.getCacheFiles(conf);
}
/**
* Return the path array of the localized caches
* @return A path array of localized caches
* @throws IOException
*/
public Path[] getLocalCacheArchives()
throws IOException {
return DistributedCache.getLocalCacheArchives(conf);
}
/**
* Return the path array of the localized files
* @return A path array of localized files
* @throws IOException
*/
public Path[] getLocalCacheFiles()
throws IOException {
return DistributedCache.getLocalCacheFiles(conf);
}
/**
* Get the file entries in classpath as an array of Path
*/
public Path[] getFileClassPaths() {
return DistributedCache.getFileClassPaths(conf);
}
/**
* Parse a list of longs into strings.
* @param timestamps the list of longs to parse
* @return a list of string that were parsed. same length as timestamps.
*/
private static String[] toTimestampStrs(long[] timestamps) {
if (timestamps == null) {
return null;
}
String[] result = new String[timestamps.length];
for(int i=0; i < timestamps.length; ++i) {
result[i] = Long.toString(timestamps[i]);
}
return result;
}
/**
* Get the timestamps of the archives. Used by internal
* DistributedCache and MapReduce code.
* @return a string array of timestamps
*/
public String[] getArchiveTimestamps() {
return toTimestampStrs(DistributedCache.getArchiveTimestamps(conf));
}
/**
* Get the timestamps of the files. Used by internal
* DistributedCache and MapReduce code.
* @return a string array of timestamps
*/
public String[] getFileTimestamps() {
return toTimestampStrs(DistributedCache.getFileTimestamps(conf));
}
/**
* Get the configured number of maximum attempts that will be made to run a
* map task, as specified by the <code>mapred.map.max.attempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per map task.
*/
public int getMaxMapAttempts() {
return conf.getMaxMapAttempts();
}
/**
* Get the configured number of maximum attempts that will be made to run a
* reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per reduce task.
*/
public int getMaxReduceAttempts() {
return conf.getMaxReduceAttempts();
}
/**
* Get whether the task profiling is enabled.
* @return true if some tasks will be profiled
*/
public boolean getProfileEnabled() {
return conf.getProfileEnabled();
}
/**
* Get the profiler configuration arguments.
*
* The default value for this property is
* "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
*
* @return the parameters to pass to the task child to configure profiling
*/
public String getProfileParams() {
return conf.getProfileParams();
}
/**
* Get the range of maps or reduces to profile.
* @param isMap is the task a map?
* @return the task ranges
*/
public IntegerRanges getProfileTaskRange(boolean isMap) {
return conf.getProfileTaskRange(isMap);
}
/**
* Get the reported username for this job.
*
* @return the username
*/
public String getUser() {
return conf.getUser();
}
public Credentials getCredentials() {
return credentials;
}
}
| 13,262 | 28.085526 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.mapreduce.task.reduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,031 | 42 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapred.ShuffleConsumerPlugin;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progress;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
@SuppressWarnings({"unchecked", "rawtypes"})
public class Shuffle<K, V> implements ShuffleConsumerPlugin<K, V>, ExceptionReporter {
private static final int PROGRESS_FREQUENCY = 2000;
private static final int MAX_EVENTS_TO_FETCH = 10000;
private static final int MIN_EVENTS_TO_FETCH = 100;
private static final int MAX_RPC_OUTSTANDING_EVENTS = 3000000;
private ShuffleConsumerPlugin.Context context;
private TaskAttemptID reduceId;
private JobConf jobConf;
private Reporter reporter;
private ShuffleClientMetrics metrics;
private TaskUmbilicalProtocol umbilical;
private ShuffleSchedulerImpl<K,V> scheduler;
private MergeManager<K, V> merger;
private Throwable throwable = null;
private String throwingThreadName = null;
private Progress copyPhase;
private TaskStatus taskStatus;
private Task reduceTask; //Used for status updates
private Map<TaskAttemptID, MapOutputFile> localMapFiles;
@Override
public void init(ShuffleConsumerPlugin.Context context) {
this.context = context;
this.reduceId = context.getReduceId();
this.jobConf = context.getJobConf();
this.umbilical = context.getUmbilical();
this.reporter = context.getReporter();
this.metrics = new ShuffleClientMetrics(reduceId, jobConf);
this.copyPhase = context.getCopyPhase();
this.taskStatus = context.getStatus();
this.reduceTask = context.getReduceTask();
this.localMapFiles = context.getLocalMapFiles();
scheduler = new ShuffleSchedulerImpl<K, V>(jobConf, taskStatus, reduceId,
this, copyPhase, context.getShuffledMapsCounter(),
context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
merger = createMergeManager(context);
}
protected MergeManager<K, V> createMergeManager(
ShuffleConsumerPlugin.Context context) {
return new MergeManagerImpl<K, V>(reduceId, jobConf, context.getLocalFS(),
context.getLocalDirAllocator(), reporter, context.getCodec(),
context.getCombinerClass(), context.getCombineCollector(),
context.getSpilledRecordsCounter(),
context.getReduceCombineInputCounter(),
context.getMergedMapOutputsCounter(), this, context.getMergePhase(),
context.getMapOutputFile());
}
@Override
public RawKeyValueIterator run() throws IOException, InterruptedException {
// Scale the maximum events we fetch per RPC call to mitigate OOM issues
// on the ApplicationMaster when a thundering herd of reducers fetch events
// TODO: This should not be necessary after HADOOP-8942
int eventsPerReducer = Math.max(MIN_EVENTS_TO_FETCH,
MAX_RPC_OUTSTANDING_EVENTS / jobConf.getNumReduceTasks());
int maxEventsToFetch = Math.min(MAX_EVENTS_TO_FETCH, eventsPerReducer);
// Start the map-completion events fetcher thread
final EventFetcher<K,V> eventFetcher =
new EventFetcher<K,V>(reduceId, umbilical, scheduler, this,
maxEventsToFetch);
eventFetcher.start();
// Start the map-output fetcher threads
boolean isLocal = localMapFiles != null;
final int numFetchers = isLocal ? 1 :
jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);
Fetcher<K,V>[] fetchers = new Fetcher[numFetchers];
if (isLocal) {
fetchers[0] = new LocalFetcher<K, V>(jobConf, reduceId, scheduler,
merger, reporter, metrics, this, reduceTask.getShuffleSecret(),
localMapFiles);
fetchers[0].start();
} else {
for (int i=0; i < numFetchers; ++i) {
fetchers[i] = new Fetcher<K,V>(jobConf, reduceId, scheduler, merger,
reporter, metrics, this,
reduceTask.getShuffleSecret());
fetchers[i].start();
}
}
// Wait for shuffle to complete successfully
while (!scheduler.waitUntilDone(PROGRESS_FREQUENCY)) {
reporter.progress();
synchronized (this) {
if (throwable != null) {
throw new ShuffleError("error in shuffle in " + throwingThreadName,
throwable);
}
}
}
// Stop the event-fetcher thread
eventFetcher.shutDown();
// Stop the map-output fetcher threads
for (Fetcher<K,V> fetcher : fetchers) {
fetcher.shutDown();
}
// stop the scheduler
scheduler.close();
copyPhase.complete(); // copy is already complete
taskStatus.setPhase(TaskStatus.Phase.SORT);
reduceTask.statusUpdate(umbilical);
// Finish the on-going merges...
RawKeyValueIterator kvIter = null;
try {
kvIter = merger.close();
} catch (Throwable e) {
throw new ShuffleError("Error while doing final merge " , e);
}
// Sanity check
synchronized (this) {
if (throwable != null) {
throw new ShuffleError("error in shuffle in " + throwingThreadName,
throwable);
}
}
return kvIter;
}
@Override
public void close(){
}
public synchronized void reportException(Throwable t) {
if (throwable == null) {
throwable = t;
throwingThreadName = Thread.currentThread().getName();
// Notify the scheduler so that the reporting thread finds the
// exception immediately.
synchronized (scheduler) {
scheduler.notifyAll();
}
}
}
public static class ShuffleError extends IOException {
private static final long serialVersionUID = 5753909320586607881L;
ShuffleError(String msg, Throwable t) {
super(msg, t);
}
}
}
| 7,187 | 35.30303 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ExceptionReporter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An interface for reporting exceptions to other threads
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public interface ExceptionReporter {
void reportException(Throwable t);
}
| 1,197 | 37.645161 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleHeader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Shuffle Header information that is sent by the TaskTracker and
* deciphered by the Fetcher thread of Reduce task
*
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class ShuffleHeader implements Writable {
/** Header info of the shuffle http request/response */
public static final String HTTP_HEADER_NAME = "name";
public static final String DEFAULT_HTTP_HEADER_NAME = "mapreduce";
public static final String HTTP_HEADER_VERSION = "version";
public static final String DEFAULT_HTTP_HEADER_VERSION = "1.0.0";
/**
* The longest possible length of task attempt id that we will accept.
*/
private static final int MAX_ID_LENGTH = 1000;
String mapId;
long uncompressedLength;
long compressedLength;
int forReduce;
public ShuffleHeader() { }
public ShuffleHeader(String mapId, long compressedLength,
long uncompressedLength, int forReduce) {
this.mapId = mapId;
this.compressedLength = compressedLength;
this.uncompressedLength = uncompressedLength;
this.forReduce = forReduce;
}
public void readFields(DataInput in) throws IOException {
mapId = WritableUtils.readStringSafely(in, MAX_ID_LENGTH);
compressedLength = WritableUtils.readVLong(in);
uncompressedLength = WritableUtils.readVLong(in);
forReduce = WritableUtils.readVInt(in);
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, mapId);
WritableUtils.writeVLong(out, compressedLength);
WritableUtils.writeVLong(out, uncompressedLength);
WritableUtils.writeVInt(out, forReduce);
}
}
| 2,770 | 34.525641 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.IFileInputStream;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* Common code for allowing MapOutput classes to handle streams.
*
* @param <K> key type for map output
* @param <V> value type for map output
*/
public abstract class IFileWrappedMapOutput<K, V> extends MapOutput<K, V> {
private final Configuration conf;
private final MergeManagerImpl<K, V> merger;
public IFileWrappedMapOutput(
Configuration c, MergeManagerImpl<K, V> m, TaskAttemptID mapId,
long size, boolean primaryMapOutput) {
super(mapId, size, primaryMapOutput);
conf = c;
merger = m;
}
/**
* @return the merger
*/
protected MergeManagerImpl<K, V> getMerger() {
return merger;
}
protected abstract void doShuffle(
MapHost host, IFileInputStream iFileInputStream,
long compressedLength, long decompressedLength,
ShuffleClientMetrics metrics, Reporter reporter) throws IOException;
@Override
public void shuffle(MapHost host, InputStream input,
long compressedLength, long decompressedLength,
ShuffleClientMetrics metrics,
Reporter reporter) throws IOException {
doShuffle(host, new IFileInputStream(input, compressedLength, conf),
compressedLength, decompressedLength, metrics, reporter);
}
}
| 2,367 | 34.343284 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskAttemptID;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class MapHost {
public static enum State {
IDLE, // No map outputs available
BUSY, // Map outputs are being fetched
PENDING, // Known map outputs which need to be fetched
PENALIZED // Host penalized due to shuffle failures
}
private State state = State.IDLE;
private final String hostName;
private final String baseUrl;
private List<TaskAttemptID> maps = new ArrayList<TaskAttemptID>();
public MapHost(String hostName, String baseUrl) {
this.hostName = hostName;
this.baseUrl = baseUrl;
}
public State getState() {
return state;
}
public String getHostName() {
return hostName;
}
public String getBaseUrl() {
return baseUrl;
}
public synchronized void addKnownMap(TaskAttemptID mapId) {
maps.add(mapId);
if (state == State.IDLE) {
state = State.PENDING;
}
}
public synchronized List<TaskAttemptID> getAndClearKnownMaps() {
List<TaskAttemptID> currentKnownMaps = maps;
maps = new ArrayList<TaskAttemptID>();
return currentKnownMaps;
}
public synchronized void markBusy() {
state = State.BUSY;
}
public synchronized void markPenalized() {
state = State.PENALIZED;
}
public synchronized int getNumKnownMapOutputs() {
return maps.size();
}
/**
* Called when the node is done with its penalty or done copying.
* @return the host's new state
*/
public synchronized State markAvailable() {
if (maps.isEmpty()) {
state = State.IDLE;
} else {
state = State.PENDING;
}
return state;
}
@Override
public String toString() {
return hostName;
}
/**
* Mark the host as penalized
*/
public synchronized void penalize() {
state = State.PENALIZED;
}
}
| 2,977 | 25.828829 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class ShuffleClientMetrics implements Updater {
private MetricsRecord shuffleMetrics = null;
private int numFailedFetches = 0;
private int numSuccessFetches = 0;
private long numBytes = 0;
private int numThreadsBusy = 0;
private final int numCopiers;
ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) {
this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);
MetricsContext metricsContext = MetricsUtil.getContext("mapred");
this.shuffleMetrics =
MetricsUtil.createRecord(metricsContext, "shuffleInput");
this.shuffleMetrics.setTag("user", jobConf.getUser());
this.shuffleMetrics.setTag("jobName", jobConf.getJobName());
this.shuffleMetrics.setTag("jobId", reduceId.getJobID().toString());
this.shuffleMetrics.setTag("taskId", reduceId.toString());
this.shuffleMetrics.setTag("sessionId", jobConf.getSessionId());
metricsContext.registerUpdater(this);
}
public synchronized void inputBytes(long numBytes) {
this.numBytes += numBytes;
}
public synchronized void failedFetch() {
++numFailedFetches;
}
public synchronized void successFetch() {
++numSuccessFetches;
}
public synchronized void threadBusy() {
++numThreadsBusy;
}
public synchronized void threadFree() {
--numThreadsBusy;
}
public void doUpdates(MetricsContext unused) {
synchronized (this) {
shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes);
shuffleMetrics.incrMetric("shuffle_failed_fetches",
numFailedFetches);
shuffleMetrics.incrMetric("shuffle_success_fetches",
numSuccessFetches);
if (numCopiers != 0) {
shuffleMetrics.setMetric("shuffle_fetchers_busy_percent",
100*((float)numThreadsBusy/numCopiers));
} else {
shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0);
}
numBytes = 0;
numSuccessFetches = 0;
numFailedFetches = 0;
}
shuffleMetrics.update();
}
}
| 3,427 | 37.088889 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.IndexRecord;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SpillRecord;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.CryptoUtils;
/**
* LocalFetcher is used by LocalJobRunner to perform a local filesystem
* fetch.
*/
class LocalFetcher<K,V> extends Fetcher<K, V> {
private static final Log LOG = LogFactory.getLog(LocalFetcher.class);
private static final MapHost LOCALHOST = new MapHost("local", "local");
private JobConf job;
private Map<TaskAttemptID, MapOutputFile> localMapFiles;
public LocalFetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K, V> scheduler,
MergeManager<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter,
SecretKey shuffleKey,
Map<TaskAttemptID, MapOutputFile> localMapFiles) {
super(job, reduceId, scheduler, merger, reporter, metrics,
exceptionReporter, shuffleKey);
this.job = job;
this.localMapFiles = localMapFiles;
setName("localfetcher#" + id);
setDaemon(true);
}
public void run() {
// Create a worklist of task attempts to work over.
Set<TaskAttemptID> maps = new HashSet<TaskAttemptID>();
for (TaskAttemptID map : localMapFiles.keySet()) {
maps.add(map);
}
while (maps.size() > 0) {
try {
// If merge is on, block
merger.waitForResource();
metrics.threadBusy();
// Copy as much as is possible.
doCopy(maps);
metrics.threadFree();
} catch (InterruptedException ie) {
} catch (Throwable t) {
exceptionReporter.reportException(t);
}
}
}
/**
* The crux of the matter...
*/
private void doCopy(Set<TaskAttemptID> maps) throws IOException {
Iterator<TaskAttemptID> iter = maps.iterator();
while (iter.hasNext()) {
TaskAttemptID map = iter.next();
LOG.debug("LocalFetcher " + id + " going to fetch: " + map);
if (copyMapOutput(map)) {
// Successful copy. Remove this from our worklist.
iter.remove();
} else {
// We got back a WAIT command; go back to the outer loop
// and block for InMemoryMerge.
break;
}
}
}
/**
* Retrieve the map output of a single map task
* and send it to the merger.
*/
private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException {
// Figure out where the map task stored its output.
Path mapOutputFileName = localMapFiles.get(mapTaskId).getOutputFile();
Path indexFileName = mapOutputFileName.suffix(".index");
// Read its index to determine the location of our split
// and its size.
SpillRecord sr = new SpillRecord(indexFileName, job);
IndexRecord ir = sr.getIndex(reduce);
long compressedLength = ir.partLength;
long decompressedLength = ir.rawLength;
compressedLength -= CryptoUtils.cryptoPadding(job);
decompressedLength -= CryptoUtils.cryptoPadding(job);
// Get the location for the map output - either in-memory or on-disk
MapOutput<K, V> mapOutput = merger.reserve(mapTaskId, decompressedLength,
id);
// Check if we can shuffle *now* ...
if (mapOutput == null) {
LOG.info("fetcher#" + id + " - MergeManager returned Status.WAIT ...");
return false;
}
// Go!
LOG.info("localfetcher#" + id + " about to shuffle output of map " +
mapOutput.getMapId() + " decomp: " +
decompressedLength + " len: " + compressedLength + " to " +
mapOutput.getDescription());
// now read the file, seek to the appropriate section, and send it.
FileSystem localFs = FileSystem.getLocal(job).getRaw();
FSDataInputStream inStream = localFs.open(mapOutputFileName);
try {
inStream = CryptoUtils.wrapIfNecessary(job, inStream);
inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job));
mapOutput.shuffle(LOCALHOST, inStream, compressedLength,
decompressedLength, metrics, reporter);
} finally {
IOUtils.cleanup(LOG, inStream);
}
scheduler.copySucceeded(mapTaskId, LOCALHOST, compressedLength, 0, 0,
mapOutput);
return true; // successful fetch.
}
}
| 5,707 | 32.97619 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.BoundedByteArrayOutputStream;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.IFile;
import org.apache.hadoop.mapred.IFileOutputStream;
import org.apache.hadoop.mapred.IFile.Writer;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class InMemoryWriter<K, V> extends Writer<K, V> {
private DataOutputStream out;
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
super(null);
this.out =
new DataOutputStream(new IFileOutputStream(arrayStream));
}
public void append(K key, V value) throws IOException {
throw new UnsupportedOperationException
("InMemoryWriter.append(K key, V value");
}
public void append(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
if (keyLength < 0) {
throw new IOException("Negative key-length not allowed: " + keyLength +
" for " + key);
}
int valueLength = value.getLength() - value.getPosition();
if (valueLength < 0) {
throw new IOException("Negative value-length not allowed: " +
valueLength + " for " + value);
}
WritableUtils.writeVInt(out, keyLength);
WritableUtils.writeVInt(out, valueLength);
out.write(key.getData(), key.getPosition(), keyLength);
out.write(value.getData(), value.getPosition(), valueLength);
}
public void close() throws IOException {
// Write EOF_MARKER for key/value length
WritableUtils.writeVInt(out, IFile.EOF_MARKER);
WritableUtils.writeVInt(out, IFile.EOF_MARKER);
// Close the stream
out.close();
out = null;
}
}
| 2,813 | 34.620253 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.IFile;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.Merger;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.mapred.Task.CombineValuesIterator;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@SuppressWarnings(value={"unchecked"})
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
private static final Log LOG = LogFactory.getLog(MergeManagerImpl.class);
/* Maximum percentage of the in-memory limit that a single shuffle can
* consume*/
private static final float DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT
= 0.25f;
private final TaskAttemptID reduceId;
private final JobConf jobConf;
private final FileSystem localFS;
private final FileSystem rfs;
private final LocalDirAllocator localDirAllocator;
protected MapOutputFile mapOutputFile;
Set<InMemoryMapOutput<K, V>> inMemoryMergedMapOutputs =
new TreeSet<InMemoryMapOutput<K,V>>(new MapOutputComparator<K, V>());
private IntermediateMemoryToMemoryMerger memToMemMerger;
Set<InMemoryMapOutput<K, V>> inMemoryMapOutputs =
new TreeSet<InMemoryMapOutput<K,V>>(new MapOutputComparator<K, V>());
private final MergeThread<InMemoryMapOutput<K,V>, K,V> inMemoryMerger;
Set<CompressAwarePath> onDiskMapOutputs = new TreeSet<CompressAwarePath>();
private final OnDiskMerger onDiskMerger;
@VisibleForTesting
final long memoryLimit;
private long usedMemory;
private long commitMemory;
private final long maxSingleShuffleLimit;
private final int memToMemMergeOutputsThreshold;
private final long mergeThreshold;
private final int ioSortFactor;
private final Reporter reporter;
private final ExceptionReporter exceptionReporter;
/**
* Combiner class to run during in-memory merge, if defined.
*/
private final Class<? extends Reducer> combinerClass;
/**
* Resettable collector used for combine.
*/
private final CombineOutputCollector<K,V> combineCollector;
private final Counters.Counter spilledRecordsCounter;
private final Counters.Counter reduceCombineInputCounter;
private final Counters.Counter mergedMapOutputsCounter;
private final CompressionCodec codec;
private final Progress mergePhase;
public MergeManagerImpl(TaskAttemptID reduceId, JobConf jobConf,
FileSystem localFS,
LocalDirAllocator localDirAllocator,
Reporter reporter,
CompressionCodec codec,
Class<? extends Reducer> combinerClass,
CombineOutputCollector<K,V> combineCollector,
Counters.Counter spilledRecordsCounter,
Counters.Counter reduceCombineInputCounter,
Counters.Counter mergedMapOutputsCounter,
ExceptionReporter exceptionReporter,
Progress mergePhase, MapOutputFile mapOutputFile) {
this.reduceId = reduceId;
this.jobConf = jobConf;
this.localDirAllocator = localDirAllocator;
this.exceptionReporter = exceptionReporter;
this.reporter = reporter;
this.codec = codec;
this.combinerClass = combinerClass;
this.combineCollector = combineCollector;
this.reduceCombineInputCounter = reduceCombineInputCounter;
this.spilledRecordsCounter = spilledRecordsCounter;
this.mergedMapOutputsCounter = mergedMapOutputsCounter;
this.mapOutputFile = mapOutputFile;
this.mapOutputFile.setConf(jobConf);
this.localFS = localFS;
this.rfs = ((LocalFileSystem)localFS).getRaw();
final float maxInMemCopyUse =
jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,
MRJobConfig.DEFAULT_SHUFFLE_INPUT_BUFFER_PERCENT);
if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) {
throw new IllegalArgumentException("Invalid value for " +
MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
maxInMemCopyUse);
}
// Allow unit tests to fix Runtime memory
this.memoryLimit = (long)(jobConf.getLong(
MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
Runtime.getRuntime().maxMemory()) * maxInMemCopyUse);
this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
final float singleShuffleMemoryLimitPercent =
jobConf.getFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,
DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT);
if (singleShuffleMemoryLimitPercent <= 0.0f
|| singleShuffleMemoryLimitPercent > 1.0f) {
throw new IllegalArgumentException("Invalid value for "
+ MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT + ": "
+ singleShuffleMemoryLimitPercent);
}
usedMemory = 0L;
commitMemory = 0L;
this.maxSingleShuffleLimit =
(long)(memoryLimit * singleShuffleMemoryLimitPercent);
this.memToMemMergeOutputsThreshold =
jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
this.mergeThreshold = (long)(this.memoryLimit *
jobConf.getFloat(
MRJobConfig.SHUFFLE_MERGE_PERCENT,
MRJobConfig.DEFAULT_SHUFFLE_MERGE_PERCENT));
LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +
"maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " +
"mergeThreshold=" + mergeThreshold + ", " +
"ioSortFactor=" + ioSortFactor + ", " +
"memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold);
if (this.maxSingleShuffleLimit >= this.mergeThreshold) {
throw new RuntimeException("Invalid configuration: "
+ "maxSingleShuffleLimit should be less than mergeThreshold "
+ "maxSingleShuffleLimit: " + this.maxSingleShuffleLimit
+ "mergeThreshold: " + this.mergeThreshold);
}
boolean allowMemToMemMerge =
jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false);
if (allowMemToMemMerge) {
this.memToMemMerger =
new IntermediateMemoryToMemoryMerger(this,
memToMemMergeOutputsThreshold);
this.memToMemMerger.start();
} else {
this.memToMemMerger = null;
}
this.inMemoryMerger = createInMemoryMerger();
this.inMemoryMerger.start();
this.onDiskMerger = new OnDiskMerger(this);
this.onDiskMerger.start();
this.mergePhase = mergePhase;
}
protected MergeThread<InMemoryMapOutput<K,V>, K,V> createInMemoryMerger() {
return new InMemoryMerger(this);
}
protected MergeThread<CompressAwarePath,K,V> createOnDiskMerger() {
return new OnDiskMerger(this);
}
TaskAttemptID getReduceId() {
return reduceId;
}
@VisibleForTesting
ExceptionReporter getExceptionReporter() {
return exceptionReporter;
}
@Override
public void waitForResource() throws InterruptedException {
inMemoryMerger.waitForMerge();
}
private boolean canShuffleToMemory(long requestedSize) {
return (requestedSize < maxSingleShuffleLimit);
}
@Override
public synchronized MapOutput<K,V> reserve(TaskAttemptID mapId,
long requestedSize,
int fetcher
) throws IOException {
if (!canShuffleToMemory(requestedSize)) {
LOG.info(mapId + ": Shuffling to disk since " + requestedSize +
" is greater than maxSingleShuffleLimit (" +
maxSingleShuffleLimit + ")");
return new OnDiskMapOutput<K,V>(mapId, this, requestedSize, jobConf,
fetcher, true, FileSystem.getLocal(jobConf).getRaw(),
mapOutputFile.getInputFileForWrite(mapId.getTaskID(), requestedSize));
}
// Stall shuffle if we are above the memory limit
// It is possible that all threads could just be stalling and not make
// progress at all. This could happen when:
//
// requested size is causing the used memory to go above limit &&
// requested size < singleShuffleLimit &&
// current used size < mergeThreshold (merge will not get triggered)
//
// To avoid this from happening, we allow exactly one thread to go past
// the memory limit. We check (usedMemory > memoryLimit) and not
// (usedMemory + requestedSize > memoryLimit). When this thread is done
// fetching, this will automatically trigger a merge thereby unlocking
// all the stalled threads
if (usedMemory > memoryLimit) {
LOG.debug(mapId + ": Stalling shuffle since usedMemory (" + usedMemory
+ ") is greater than memoryLimit (" + memoryLimit + ")." +
" CommitMemory is (" + commitMemory + ")");
return null;
}
// Allow the in-memory shuffle to progress
LOG.debug(mapId + ": Proceeding with shuffle since usedMemory ("
+ usedMemory + ") is lesser than memoryLimit (" + memoryLimit + ")."
+ "CommitMemory is (" + commitMemory + ")");
return unconditionalReserve(mapId, requestedSize, true);
}
/**
* Unconditional Reserve is used by the Memory-to-Memory thread
* @return
*/
private synchronized InMemoryMapOutput<K, V> unconditionalReserve(
TaskAttemptID mapId, long requestedSize, boolean primaryMapOutput) {
usedMemory += requestedSize;
return new InMemoryMapOutput<K,V>(jobConf, mapId, this, (int)requestedSize,
codec, primaryMapOutput);
}
synchronized void unreserve(long size) {
usedMemory -= size;
}
public synchronized void closeInMemoryFile(InMemoryMapOutput<K,V> mapOutput) {
inMemoryMapOutputs.add(mapOutput);
LOG.info("closeInMemoryFile -> map-output of size: " + mapOutput.getSize()
+ ", inMemoryMapOutputs.size() -> " + inMemoryMapOutputs.size()
+ ", commitMemory -> " + commitMemory + ", usedMemory ->" + usedMemory);
commitMemory+= mapOutput.getSize();
// Can hang if mergeThreshold is really low.
if (commitMemory >= mergeThreshold) {
LOG.info("Starting inMemoryMerger's merge since commitMemory=" +
commitMemory + " > mergeThreshold=" + mergeThreshold +
". Current usedMemory=" + usedMemory);
inMemoryMapOutputs.addAll(inMemoryMergedMapOutputs);
inMemoryMergedMapOutputs.clear();
inMemoryMerger.startMerge(inMemoryMapOutputs);
commitMemory = 0L; // Reset commitMemory.
}
if (memToMemMerger != null) {
if (inMemoryMapOutputs.size() >= memToMemMergeOutputsThreshold) {
memToMemMerger.startMerge(inMemoryMapOutputs);
}
}
}
public synchronized void closeInMemoryMergedFile(InMemoryMapOutput<K,V> mapOutput) {
inMemoryMergedMapOutputs.add(mapOutput);
LOG.info("closeInMemoryMergedFile -> size: " + mapOutput.getSize() +
", inMemoryMergedMapOutputs.size() -> " +
inMemoryMergedMapOutputs.size());
}
public synchronized void closeOnDiskFile(CompressAwarePath file) {
onDiskMapOutputs.add(file);
if (onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) {
onDiskMerger.startMerge(onDiskMapOutputs);
}
}
@Override
public RawKeyValueIterator close() throws Throwable {
// Wait for on-going merges to complete
if (memToMemMerger != null) {
memToMemMerger.close();
}
inMemoryMerger.close();
onDiskMerger.close();
List<InMemoryMapOutput<K, V>> memory =
new ArrayList<InMemoryMapOutput<K, V>>(inMemoryMergedMapOutputs);
inMemoryMergedMapOutputs.clear();
memory.addAll(inMemoryMapOutputs);
inMemoryMapOutputs.clear();
List<CompressAwarePath> disk = new ArrayList<CompressAwarePath>(onDiskMapOutputs);
onDiskMapOutputs.clear();
return finalMerge(jobConf, rfs, memory, disk);
}
private class IntermediateMemoryToMemoryMerger
extends MergeThread<InMemoryMapOutput<K, V>, K, V> {
public IntermediateMemoryToMemoryMerger(MergeManagerImpl<K, V> manager,
int mergeFactor) {
super(manager, mergeFactor, exceptionReporter);
setName("InMemoryMerger - Thread to do in-memory merge of in-memory " +
"shuffled map-outputs");
setDaemon(true);
}
@Override
public void merge(List<InMemoryMapOutput<K, V>> inputs) throws IOException {
if (inputs == null || inputs.size() == 0) {
return;
}
TaskAttemptID dummyMapId = inputs.get(0).getMapId();
List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
long mergeOutputSize =
createInMemorySegments(inputs, inMemorySegments, 0);
int noInMemorySegments = inMemorySegments.size();
InMemoryMapOutput<K, V> mergedMapOutputs =
unconditionalReserve(dummyMapId, mergeOutputSize, false);
Writer<K, V> writer =
new InMemoryWriter<K, V>(mergedMapOutputs.getArrayStream());
LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments +
" segments of total-size: " + mergeOutputSize);
RawKeyValueIterator rIter =
Merger.merge(jobConf, rfs,
(Class<K>)jobConf.getMapOutputKeyClass(),
(Class<V>)jobConf.getMapOutputValueClass(),
inMemorySegments, inMemorySegments.size(),
new Path(reduceId.toString()),
(RawComparator<K>)jobConf.getOutputKeyComparator(),
reporter, null, null, null);
Merger.writeFile(rIter, writer, reporter, jobConf);
writer.close();
LOG.info(reduceId +
" Memory-to-Memory merge of the " + noInMemorySegments +
" files in-memory complete.");
// Note the output of the merge
closeInMemoryMergedFile(mergedMapOutputs);
}
}
private class InMemoryMerger extends MergeThread<InMemoryMapOutput<K,V>, K,V> {
public InMemoryMerger(MergeManagerImpl<K, V> manager) {
super(manager, Integer.MAX_VALUE, exceptionReporter);
setName
("InMemoryMerger - Thread to merge in-memory shuffled map-outputs");
setDaemon(true);
}
@Override
public void merge(List<InMemoryMapOutput<K,V>> inputs) throws IOException {
if (inputs == null || inputs.size() == 0) {
return;
}
//name this output file same as the name of the first file that is
//there in the current list of inmem files (this is guaranteed to
//be absent on the disk currently. So we don't overwrite a prev.
//created spill). Also we need to create the output file now since
//it is not guaranteed that this file will be present after merge
//is called (we delete empty files as soon as we see them
//in the merge method)
//figure out the mapId
TaskAttemptID mapId = inputs.get(0).getMapId();
TaskID mapTaskId = mapId.getTaskID();
List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
long mergeOutputSize =
createInMemorySegments(inputs, inMemorySegments,0);
int noInMemorySegments = inMemorySegments.size();
Path outputPath =
mapOutputFile.getInputFileForWrite(mapTaskId,
mergeOutputSize).suffix(
Task.MERGED_OUTPUT_PREFIX);
FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath));
Writer<K, V> writer = new Writer<K, V>(jobConf, out,
(Class<K>) jobConf.getMapOutputKeyClass(),
(Class<V>) jobConf.getMapOutputValueClass(), codec, null, true);
RawKeyValueIterator rIter = null;
CompressAwarePath compressAwarePath;
try {
LOG.info("Initiating in-memory merge with " + noInMemorySegments +
" segments...");
rIter = Merger.merge(jobConf, rfs,
(Class<K>)jobConf.getMapOutputKeyClass(),
(Class<V>)jobConf.getMapOutputValueClass(),
inMemorySegments, inMemorySegments.size(),
new Path(reduceId.toString()),
(RawComparator<K>)jobConf.getOutputKeyComparator(),
reporter, spilledRecordsCounter, null, null);
if (null == combinerClass) {
Merger.writeFile(rIter, writer, reporter, jobConf);
} else {
combineCollector.setWriter(writer);
combineAndSpill(rIter, reduceCombineInputCounter);
}
writer.close();
compressAwarePath = new CompressAwarePath(outputPath,
writer.getRawLength(), writer.getCompressedLength());
LOG.info(reduceId +
" Merge of the " + noInMemorySegments +
" files in-memory complete." +
" Local file is " + outputPath + " of size " +
localFS.getFileStatus(outputPath).getLen());
} catch (IOException e) {
//make sure that we delete the ondisk file that we created
//earlier when we invoked cloneFileAttributes
localFS.delete(outputPath, true);
throw e;
}
// Note the output of the merge
closeOnDiskFile(compressAwarePath);
}
}
private class OnDiskMerger extends MergeThread<CompressAwarePath,K,V> {
public OnDiskMerger(MergeManagerImpl<K, V> manager) {
super(manager, ioSortFactor, exceptionReporter);
setName("OnDiskMerger - Thread to merge on-disk map-outputs");
setDaemon(true);
}
@Override
public void merge(List<CompressAwarePath> inputs) throws IOException {
// sanity check
if (inputs == null || inputs.isEmpty()) {
LOG.info("No ondisk files to merge...");
return;
}
long approxOutputSize = 0;
int bytesPerSum =
jobConf.getInt("io.bytes.per.checksum", 512);
LOG.info("OnDiskMerger: We have " + inputs.size() +
" map outputs on disk. Triggering merge...");
// 1. Prepare the list of files to be merged.
for (CompressAwarePath file : inputs) {
approxOutputSize += localFS.getFileStatus(file).getLen();
}
// add the checksum length
approxOutputSize +=
ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum);
// 2. Start the on-disk merge process
Path outputPath =
localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(),
approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX);
FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath));
Writer<K, V> writer = new Writer<K, V>(jobConf, out,
(Class<K>) jobConf.getMapOutputKeyClass(),
(Class<V>) jobConf.getMapOutputValueClass(), codec, null, true);
RawKeyValueIterator iter = null;
CompressAwarePath compressAwarePath;
Path tmpDir = new Path(reduceId.toString());
try {
iter = Merger.merge(jobConf, rfs,
(Class<K>) jobConf.getMapOutputKeyClass(),
(Class<V>) jobConf.getMapOutputValueClass(),
codec, inputs.toArray(new Path[inputs.size()]),
true, ioSortFactor, tmpDir,
(RawComparator<K>) jobConf.getOutputKeyComparator(),
reporter, spilledRecordsCounter, null,
mergedMapOutputsCounter, null);
Merger.writeFile(iter, writer, reporter, jobConf);
writer.close();
compressAwarePath = new CompressAwarePath(outputPath,
writer.getRawLength(), writer.getCompressedLength());
} catch (IOException e) {
localFS.delete(outputPath, true);
throw e;
}
closeOnDiskFile(compressAwarePath);
LOG.info(reduceId +
" Finished merging " + inputs.size() +
" map output files on disk of total-size " +
approxOutputSize + "." +
" Local output file is " + outputPath + " of size " +
localFS.getFileStatus(outputPath).getLen());
}
}
private void combineAndSpill(
RawKeyValueIterator kvIter,
Counters.Counter inCounter) throws IOException {
JobConf job = jobConf;
Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
RawComparator<K> comparator =
(RawComparator<K>)job.getCombinerKeyGroupingComparator();
try {
CombineValuesIterator values = new CombineValuesIterator(
kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
inCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
Reporter.NULL);
values.nextKey();
}
} finally {
combiner.close();
}
}
private long createInMemorySegments(List<InMemoryMapOutput<K,V>> inMemoryMapOutputs,
List<Segment<K, V>> inMemorySegments,
long leaveBytes
) throws IOException {
long totalSize = 0L;
// We could use fullSize could come from the RamManager, but files can be
// closed but not yet present in inMemoryMapOutputs
long fullSize = 0L;
for (InMemoryMapOutput<K,V> mo : inMemoryMapOutputs) {
fullSize += mo.getMemory().length;
}
while(fullSize > leaveBytes) {
InMemoryMapOutput<K,V> mo = inMemoryMapOutputs.remove(0);
byte[] data = mo.getMemory();
long size = data.length;
totalSize += size;
fullSize -= size;
Reader<K,V> reader = new InMemoryReader<K,V>(MergeManagerImpl.this,
mo.getMapId(),
data, 0, (int)size, jobConf);
inMemorySegments.add(new Segment<K,V>(reader, true,
(mo.isPrimaryMapOutput() ?
mergedMapOutputsCounter : null)));
}
return totalSize;
}
class RawKVIteratorReader extends IFile.Reader<K,V> {
private final RawKeyValueIterator kvIter;
public RawKVIteratorReader(RawKeyValueIterator kvIter, long size)
throws IOException {
super(null, null, size, null, spilledRecordsCounter);
this.kvIter = kvIter;
}
public boolean nextRawKey(DataInputBuffer key) throws IOException {
if (kvIter.next()) {
final DataInputBuffer kb = kvIter.getKey();
final int kp = kb.getPosition();
final int klen = kb.getLength() - kp;
key.reset(kb.getData(), kp, klen);
bytesRead += klen;
return true;
}
return false;
}
public void nextRawValue(DataInputBuffer value) throws IOException {
final DataInputBuffer vb = kvIter.getValue();
final int vp = vb.getPosition();
final int vlen = vb.getLength() - vp;
value.reset(vb.getData(), vp, vlen);
bytesRead += vlen;
}
public long getPosition() throws IOException {
return bytesRead;
}
public void close() throws IOException {
kvIter.close();
}
}
@VisibleForTesting
final long getMaxInMemReduceLimit() {
final float maxRedPer =
jobConf.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f);
if (maxRedPer > 1.0 || maxRedPer < 0.0) {
throw new RuntimeException(maxRedPer + ": "
+ MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT
+ " must be a float between 0 and 1.0");
}
return (long)(memoryLimit * maxRedPer);
}
private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs,
List<InMemoryMapOutput<K,V>> inMemoryMapOutputs,
List<CompressAwarePath> onDiskMapOutputs
) throws IOException {
LOG.info("finalMerge called with " +
inMemoryMapOutputs.size() + " in-memory map-outputs and " +
onDiskMapOutputs.size() + " on-disk map-outputs");
final long maxInMemReduce = getMaxInMemReduceLimit();
// merge config params
Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
Class<V> valueClass = (Class<V>)job.getMapOutputValueClass();
boolean keepInputs = job.getKeepFailedTaskFiles();
final Path tmpDir = new Path(reduceId.toString());
final RawComparator<K> comparator =
(RawComparator<K>)job.getOutputKeyComparator();
// segments required to vacate memory
List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>();
long inMemToDiskBytes = 0;
boolean mergePhaseFinished = false;
if (inMemoryMapOutputs.size() > 0) {
TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs,
memDiskSegments,
maxInMemReduce);
final int numMemDiskSegments = memDiskSegments.size();
if (numMemDiskSegments > 0 &&
ioSortFactor > onDiskMapOutputs.size()) {
// If we reach here, it implies that we have less than io.sort.factor
// disk segments and this will be incremented by 1 (result of the
// memory segments merge). Since this total would still be
// <= io.sort.factor, we will not do any more intermediate merges,
// the merge of all these disk segments would be directly fed to the
// reduce method
mergePhaseFinished = true;
// must spill to disk, but can't retain in-mem for intermediate merge
final Path outputPath =
mapOutputFile.getInputFileForWrite(mapId,
inMemToDiskBytes).suffix(
Task.MERGED_OUTPUT_PREFIX);
final RawKeyValueIterator rIter = Merger.merge(job, fs,
keyClass, valueClass, memDiskSegments, numMemDiskSegments,
tmpDir, comparator, reporter, spilledRecordsCounter, null,
mergePhase);
FSDataOutputStream out = CryptoUtils.wrapIfNecessary(job, fs.create(outputPath));
Writer<K, V> writer = new Writer<K, V>(job, out, keyClass, valueClass,
codec, null, true);
try {
Merger.writeFile(rIter, writer, reporter, job);
writer.close();
onDiskMapOutputs.add(new CompressAwarePath(outputPath,
writer.getRawLength(), writer.getCompressedLength()));
writer = null;
// add to list of final disk outputs.
} catch (IOException e) {
if (null != outputPath) {
try {
fs.delete(outputPath, true);
} catch (IOException ie) {
// NOTHING
}
}
throw e;
} finally {
if (null != writer) {
writer.close();
}
}
LOG.info("Merged " + numMemDiskSegments + " segments, " +
inMemToDiskBytes + " bytes to disk to satisfy " +
"reduce memory limit");
inMemToDiskBytes = 0;
memDiskSegments.clear();
} else if (inMemToDiskBytes != 0) {
LOG.info("Keeping " + numMemDiskSegments + " segments, " +
inMemToDiskBytes + " bytes in memory for " +
"intermediate, on-disk merge");
}
}
// segments on disk
List<Segment<K,V>> diskSegments = new ArrayList<Segment<K,V>>();
long onDiskBytes = inMemToDiskBytes;
long rawBytes = inMemToDiskBytes;
CompressAwarePath[] onDisk = onDiskMapOutputs.toArray(
new CompressAwarePath[onDiskMapOutputs.size()]);
for (CompressAwarePath file : onDisk) {
long fileLength = fs.getFileStatus(file).getLen();
onDiskBytes += fileLength;
rawBytes += (file.getRawDataLength() > 0) ? file.getRawDataLength() : fileLength;
LOG.debug("Disk file: " + file + " Length is " + fileLength);
diskSegments.add(new Segment<K, V>(job, fs, file, codec, keepInputs,
(file.toString().endsWith(
Task.MERGED_OUTPUT_PREFIX) ?
null : mergedMapOutputsCounter), file.getRawDataLength()
));
}
LOG.info("Merging " + onDisk.length + " files, " +
onDiskBytes + " bytes from disk");
Collections.sort(diskSegments, new Comparator<Segment<K,V>>() {
public int compare(Segment<K, V> o1, Segment<K, V> o2) {
if (o1.getLength() == o2.getLength()) {
return 0;
}
return o1.getLength() < o2.getLength() ? -1 : 1;
}
});
// build final list of segments from merged backed by disk + in-mem
List<Segment<K,V>> finalSegments = new ArrayList<Segment<K,V>>();
long inMemBytes = createInMemorySegments(inMemoryMapOutputs,
finalSegments, 0);
LOG.info("Merging " + finalSegments.size() + " segments, " +
inMemBytes + " bytes from memory into reduce");
if (0 != onDiskBytes) {
final int numInMemSegments = memDiskSegments.size();
diskSegments.addAll(0, memDiskSegments);
memDiskSegments.clear();
// Pass mergePhase only if there is a going to be intermediate
// merges. See comment where mergePhaseFinished is being set
Progress thisPhase = (mergePhaseFinished) ? null : mergePhase;
RawKeyValueIterator diskMerge = Merger.merge(
job, fs, keyClass, valueClass, codec, diskSegments,
ioSortFactor, numInMemSegments, tmpDir, comparator,
reporter, false, spilledRecordsCounter, null, thisPhase);
diskSegments.clear();
if (0 == finalSegments.size()) {
return diskMerge;
}
finalSegments.add(new Segment<K,V>(
new RawKVIteratorReader(diskMerge, onDiskBytes), true, rawBytes));
}
return Merger.merge(job, fs, keyClass, valueClass,
finalSegments, finalSegments.size(), tmpDir,
comparator, reporter, spilledRecordsCounter, null,
null);
}
static class CompressAwarePath extends Path {
private long rawDataLength;
private long compressedSize;
public CompressAwarePath(Path path, long rawDataLength, long compressSize) {
super(path.toUri());
this.rawDataLength = rawDataLength;
this.compressedSize = compressSize;
}
public long getRawDataLength() {
return rawDataLength;
}
public long getCompressedSize() {
return compressedSize;
}
@Override
public boolean equals(Object other) {
return super.equals(other);
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public int compareTo(Object obj) {
if(obj instanceof CompressAwarePath) {
CompressAwarePath compPath = (CompressAwarePath) obj;
if(this.compressedSize < compPath.getCompressedSize()) {
return -1;
} else if (this.getCompressedSize() > compPath.getCompressedSize()) {
return 1;
}
// Not returning 0 here so that objects with the same size (but
// different paths) are still added to the TreeSet.
}
return super.compareTo(obj);
}
}
}
| 34,225 | 38.25 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.InputStream;
import java.io.IOException;
import java.util.Comparator;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public abstract class MapOutput<K, V> {
private static AtomicInteger ID = new AtomicInteger(0);
private final int id;
private final TaskAttemptID mapId;
private final long size;
private final boolean primaryMapOutput;
public MapOutput(TaskAttemptID mapId, long size, boolean primaryMapOutput) {
this.id = ID.incrementAndGet();
this.mapId = mapId;
this.size = size;
this.primaryMapOutput = primaryMapOutput;
}
public boolean isPrimaryMapOutput() {
return primaryMapOutput;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof MapOutput) {
return id == ((MapOutput)obj).id;
}
return false;
}
@Override
public int hashCode() {
return id;
}
public TaskAttemptID getMapId() {
return mapId;
}
public long getSize() {
return size;
}
public abstract void shuffle(MapHost host, InputStream input,
long compressedLength,
long decompressedLength,
ShuffleClientMetrics metrics,
Reporter reporter) throws IOException;
public abstract void commit() throws IOException;
public abstract void abort();
public abstract String getDescription();
public String toString() {
return "MapOutput(" + mapId + ", " + getDescription() + ")";
}
public static class MapOutputComparator<K, V>
implements Comparator<MapOutput<K, V>> {
public int compare(MapOutput<K, V> o1, MapOutput<K, V> o2) {
if (o1.id == o2.id) {
return 0;
}
if (o1.size < o2.size) {
return -1;
} else if (o1.size > o2.size) {
return 1;
}
if (o1.id < o2.id) {
return -1;
} else {
return 1;
}
}
}
}
| 3,116 | 26.342105 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.crypto.SecretKey;
import javax.net.ssl.HttpsURLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import com.google.common.annotations.VisibleForTesting;
class Fetcher<K,V> extends Thread {
private static final Log LOG = LogFactory.getLog(Fetcher.class);
/** Number of ms before timing out a copy */
private static final int DEFAULT_STALLED_COPY_TIMEOUT = 3 * 60 * 1000;
/** Basic/unit connection timeout (in milliseconds) */
private final static int UNIT_CONNECT_TIMEOUT = 60 * 1000;
/* Default read timeout (in milliseconds) */
private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000;
protected final Reporter reporter;
private static enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP,
CONNECTION, WRONG_REDUCE}
private final static String SHUFFLE_ERR_GRP_NAME = "Shuffle Errors";
private final JobConf jobConf;
private final Counters.Counter connectionErrs;
private final Counters.Counter ioErrs;
private final Counters.Counter wrongLengthErrs;
private final Counters.Counter badIdErrs;
private final Counters.Counter wrongMapErrs;
private final Counters.Counter wrongReduceErrs;
protected final MergeManager<K,V> merger;
protected final ShuffleSchedulerImpl<K,V> scheduler;
protected final ShuffleClientMetrics metrics;
protected final ExceptionReporter exceptionReporter;
protected final int id;
private static int nextId = 0;
protected final int reduce;
private final int connectionTimeout;
private final int readTimeout;
private final int fetchRetryTimeout;
private final int fetchRetryInterval;
private final boolean fetchRetryEnabled;
private final SecretKey shuffleSecretKey;
protected HttpURLConnection connection;
private volatile boolean stopped = false;
// Initiative value is 0, which means it hasn't retried yet.
private long retryStartTime = 0;
private static boolean sslShuffle;
private static SSLFactory sslFactory;
public Fetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K,V> scheduler, MergeManager<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter, SecretKey shuffleKey) {
this(job, reduceId, scheduler, merger, reporter, metrics,
exceptionReporter, shuffleKey, ++nextId);
}
@VisibleForTesting
Fetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K,V> scheduler, MergeManager<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter, SecretKey shuffleKey,
int id) {
this.jobConf = job;
this.reporter = reporter;
this.scheduler = scheduler;
this.merger = merger;
this.metrics = metrics;
this.exceptionReporter = exceptionReporter;
this.id = id;
this.reduce = reduceId.getTaskID().getId();
this.shuffleSecretKey = shuffleKey;
ioErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.IO_ERROR.toString());
wrongLengthErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.WRONG_LENGTH.toString());
badIdErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.BAD_ID.toString());
wrongMapErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.WRONG_MAP.toString());
connectionErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.CONNECTION.toString());
wrongReduceErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME,
ShuffleErrors.WRONG_REDUCE.toString());
this.connectionTimeout =
job.getInt(MRJobConfig.SHUFFLE_CONNECT_TIMEOUT,
DEFAULT_STALLED_COPY_TIMEOUT);
this.readTimeout =
job.getInt(MRJobConfig.SHUFFLE_READ_TIMEOUT, DEFAULT_READ_TIMEOUT);
this.fetchRetryInterval = job.getInt(MRJobConfig.SHUFFLE_FETCH_RETRY_INTERVAL_MS,
MRJobConfig.DEFAULT_SHUFFLE_FETCH_RETRY_INTERVAL_MS);
this.fetchRetryTimeout = job.getInt(MRJobConfig.SHUFFLE_FETCH_RETRY_TIMEOUT_MS,
DEFAULT_STALLED_COPY_TIMEOUT);
boolean shuffleFetchEnabledDefault = job.getBoolean(
YarnConfiguration.NM_RECOVERY_ENABLED,
YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED);
this.fetchRetryEnabled = job.getBoolean(
MRJobConfig.SHUFFLE_FETCH_RETRY_ENABLED,
shuffleFetchEnabledDefault);
setName("fetcher#" + id);
setDaemon(true);
synchronized (Fetcher.class) {
sslShuffle = job.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT);
if (sslShuffle && sslFactory == null) {
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, job);
try {
sslFactory.init();
} catch (Exception ex) {
sslFactory.destroy();
throw new RuntimeException(ex);
}
}
}
}
public void run() {
try {
while (!stopped && !Thread.currentThread().isInterrupted()) {
MapHost host = null;
try {
// If merge is on, block
merger.waitForResource();
// Get a host to shuffle from
host = scheduler.getHost();
metrics.threadBusy();
// Shuffle
copyFromHost(host);
} finally {
if (host != null) {
scheduler.freeHost(host);
metrics.threadFree();
}
}
}
} catch (InterruptedException ie) {
return;
} catch (Throwable t) {
exceptionReporter.reportException(t);
}
}
@Override
public void interrupt() {
try {
closeConnection();
} finally {
super.interrupt();
}
}
public void shutDown() throws InterruptedException {
this.stopped = true;
interrupt();
try {
join(5000);
} catch (InterruptedException ie) {
LOG.warn("Got interrupt while joining " + getName(), ie);
}
if (sslFactory != null) {
sslFactory.destroy();
}
}
@VisibleForTesting
protected synchronized void openConnection(URL url)
throws IOException {
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if (sslShuffle) {
HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
try {
httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
}
connection = conn;
}
protected synchronized void closeConnection() {
// Note that HttpURLConnection::disconnect() doesn't trash the object.
// connect() attempts to reconnect in a loop, possibly reversing this
if (connection != null) {
connection.disconnect();
}
}
private void abortConnect(MapHost host, Set<TaskAttemptID> remaining) {
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
closeConnection();
}
private DataInputStream openShuffleUrl(MapHost host,
Set<TaskAttemptID> remaining, URL url) {
DataInputStream input = null;
try {
setupConnectionsWithRetry(host, remaining, url);
if (stopped) {
abortConnect(host, remaining);
} else {
input = new DataInputStream(connection.getInputStream());
}
} catch (IOException ie) {
boolean connectExcpt = ie instanceof ConnectException;
ioErrs.increment(1);
LOG.warn("Failed to connect to " + host + " with " + remaining.size() +
" map outputs", ie);
// If connect did not succeed, just mark all the maps as failed,
// indirectly penalizing the host
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: remaining) {
scheduler.copyFailed(left, host, false, connectExcpt);
}
// Add back all the remaining maps, WITHOUT marking them as failed
for(TaskAttemptID left: remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
return input;
}
/**
* The crux of the matter...
*
* @param host {@link MapHost} from which we need to
* shuffle available map-outputs.
*/
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if(LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: "
+ maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
try {
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanup(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanup(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
}
private void setupConnectionsWithRetry(MapHost host,
Set<TaskAttemptID> remaining, URL url) throws IOException {
openConnectionWithRetry(host, remaining, url);
if (stopped) {
return;
}
// generate hash of the url
String msgToEncode = SecureShuffleUtils.buildMsgFrom(url);
String encHash = SecureShuffleUtils.hashFromString(msgToEncode,
shuffleSecretKey);
setupShuffleConnection(encHash);
connect(connection, connectionTimeout);
// verify that the thread wasn't stopped during calls to connect
if (stopped) {
return;
}
verifyConnection(url, msgToEncode, encHash);
}
private void openConnectionWithRetry(MapHost host,
Set<TaskAttemptID> remaining, URL url) throws IOException {
long startTime = Time.monotonicNow();
boolean shouldWait = true;
while (shouldWait) {
try {
openConnection(url);
shouldWait = false;
} catch (IOException e) {
if (!fetchRetryEnabled) {
// throw exception directly if fetch's retry is not enabled
throw e;
}
if ((Time.monotonicNow() - startTime) >= this.fetchRetryTimeout) {
LOG.warn("Failed to connect to host: " + url + "after "
+ fetchRetryTimeout + " milliseconds.");
throw e;
}
try {
Thread.sleep(this.fetchRetryInterval);
} catch (InterruptedException e1) {
if (stopped) {
return;
}
}
}
}
}
private void verifyConnection(URL url, String msgToEncode, String encHash)
throws IOException {
// Validate response code
int rc = connection.getResponseCode();
if (rc != HttpURLConnection.HTTP_OK) {
throw new IOException(
"Got invalid response code " + rc + " from " + url +
": " + connection.getResponseMessage());
}
// get the shuffle version
if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
|| !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))) {
throw new IOException("Incompatible shuffle response version");
}
// get the replyHash which is HMac of the encHash we sent to the server
String replyHash = connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH);
if(replyHash==null) {
throw new IOException("security validation of TT Map output failed");
}
LOG.debug("url="+msgToEncode+";encHash="+encHash+";replyHash="+replyHash);
// verify that replyHash is HMac of encHash
SecureShuffleUtils.verifyReply(replyHash, encHash, shuffleSecretKey);
LOG.debug("for url="+msgToEncode+" sent hash and received reply");
}
private void setupShuffleConnection(String encHash) {
// put url hash into http header
connection.addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
// set the read timeout
connection.setReadTimeout(readTimeout);
// put shuffle version into http header
connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
}
private static TaskAttemptID[] EMPTY_ATTEMPT_ID_ARRAY = new TaskAttemptID[0];
private TaskAttemptID[] copyMapOutput(MapHost host,
DataInputStream input,
Set<TaskAttemptID> remaining,
boolean canRetry) throws IOException {
MapOutput<K,V> mapOutput = null;
TaskAttemptID mapId = null;
long decompressedLength = -1;
long compressedLength = -1;
try {
long startTime = Time.monotonicNow();
int forReduce = -1;
//Read the shuffle header
try {
ShuffleHeader header = new ShuffleHeader();
header.readFields(input);
mapId = TaskAttemptID.forName(header.mapId);
compressedLength = header.compressedLength;
decompressedLength = header.uncompressedLength;
forReduce = header.forReduce;
} catch (IllegalArgumentException e) {
badIdErrs.increment(1);
LOG.warn("Invalid map id ", e);
//Don't know which one was bad, so consider all of them as bad
return remaining.toArray(new TaskAttemptID[remaining.size()]);
}
InputStream is = input;
is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength);
compressedLength -= CryptoUtils.cryptoPadding(jobConf);
decompressedLength -= CryptoUtils.cryptoPadding(jobConf);
// Do some basic sanity verification
if (!verifySanity(compressedLength, decompressedLength, forReduce,
remaining, mapId)) {
return new TaskAttemptID[] {mapId};
}
if(LOG.isDebugEnabled()) {
LOG.debug("header: " + mapId + ", len: " + compressedLength +
", decomp len: " + decompressedLength);
}
// Get the location for the map output - either in-memory or on-disk
try {
mapOutput = merger.reserve(mapId, decompressedLength, id);
} catch (IOException ioe) {
// kill this reduce attempt
ioErrs.increment(1);
scheduler.reportLocalError(ioe);
return EMPTY_ATTEMPT_ID_ARRAY;
}
// Check if we can shuffle *now* ...
if (mapOutput == null) {
LOG.info("fetcher#" + id + " - MergeManager returned status WAIT ...");
//Not an error but wait to process data.
return EMPTY_ATTEMPT_ID_ARRAY;
}
// The codec for lz0,lz4,snappy,bz2,etc. throw java.lang.InternalError
// on decompression failures. Catching and re-throwing as IOException
// to allow fetch failure logic to be processed
try {
// Go!
LOG.info("fetcher#" + id + " about to shuffle output of map "
+ mapOutput.getMapId() + " decomp: " + decompressedLength
+ " len: " + compressedLength + " to " + mapOutput.getDescription());
mapOutput.shuffle(host, is, compressedLength, decompressedLength,
metrics, reporter);
} catch (java.lang.InternalError e) {
LOG.warn("Failed to shuffle for fetcher#"+id, e);
throw new IOException(e);
}
// Inform the shuffle scheduler
long endTime = Time.monotonicNow();
// Reset retryStartTime as map task make progress if retried before.
retryStartTime = 0;
scheduler.copySucceeded(mapId, host, compressedLength,
startTime, endTime, mapOutput);
// Note successful shuffle
remaining.remove(mapId);
metrics.successFetch();
return null;
} catch (IOException ioe) {
if (mapOutput != null) {
mapOutput.abort();
}
if (canRetry) {
checkTimeoutOrRetry(host, ioe);
}
ioErrs.increment(1);
if (mapId == null || mapOutput == null) {
LOG.warn("fetcher#" + id + " failed to read map header" +
mapId + " decomp: " +
decompressedLength + ", " + compressedLength, ioe);
if(mapId == null) {
return remaining.toArray(new TaskAttemptID[remaining.size()]);
} else {
return new TaskAttemptID[] {mapId};
}
}
LOG.warn("Failed to shuffle output of " + mapId +
" from " + host.getHostName(), ioe);
// Inform the shuffle-scheduler
metrics.failedFetch();
return new TaskAttemptID[] {mapId};
}
}
/** check if hit timeout of retry, if not, throw an exception and start a
* new round of retry.*/
private void checkTimeoutOrRetry(MapHost host, IOException ioe)
throws IOException {
// First time to retry.
long currentTime = Time.monotonicNow();
if (retryStartTime == 0) {
retryStartTime = currentTime;
}
// Retry is not timeout, let's do retry with throwing an exception.
if (currentTime - retryStartTime < this.fetchRetryTimeout) {
LOG.warn("Shuffle output from " + host.getHostName() +
" failed, retry it.", ioe);
throw ioe;
} else {
// timeout, prepare to be failed.
LOG.warn("Timeout for copying MapOutput with retry on host " + host
+ "after " + fetchRetryTimeout + " milliseconds.");
}
}
/**
* Do some basic verification on the input received -- Being defensive
* @param compressedLength
* @param decompressedLength
* @param forReduce
* @param remaining
* @param mapId
* @return true/false, based on if the verification succeeded or not
*/
private boolean verifySanity(long compressedLength, long decompressedLength,
int forReduce, Set<TaskAttemptID> remaining, TaskAttemptID mapId) {
if (compressedLength < 0 || decompressedLength < 0) {
wrongLengthErrs.increment(1);
LOG.warn(getName() + " invalid lengths in map output header: id: " +
mapId + " len: " + compressedLength + ", decomp len: " +
decompressedLength);
return false;
}
if (forReduce != reduce) {
wrongReduceErrs.increment(1);
LOG.warn(getName() + " data for the wrong reduce map: " +
mapId + " len: " + compressedLength + " decomp len: " +
decompressedLength + " for reduce " + forReduce);
return false;
}
// Sanity check
if (!remaining.contains(mapId)) {
wrongMapErrs.increment(1);
LOG.warn("Invalid map-output! Received output for " + mapId);
return false;
}
return true;
}
/**
* Create the map-output-url. This will contain all the map ids
* separated by commas
* @param host
* @param maps
* @return
* @throws MalformedURLException
*/
private URL getMapOutputURL(MapHost host, Collection<TaskAttemptID> maps
) throws MalformedURLException {
// Get the base url
StringBuffer url = new StringBuffer(host.getBaseUrl());
boolean first = true;
for (TaskAttemptID mapId : maps) {
if (!first) {
url.append(",");
}
url.append(mapId);
first = false;
}
LOG.debug("MapOutput URL for " + host + " -> " + url.toString());
return new URL(url.toString());
}
/**
* The connection establishment is attempted multiple times and is given up
* only on the last failure. Instead of connecting with a timeout of
* X, we try connecting with a timeout of x < X but multiple times.
*/
private void connect(URLConnection connection, int connectionTimeout)
throws IOException {
int unit = 0;
if (connectionTimeout < 0) {
throw new IOException("Invalid timeout "
+ "[timeout = " + connectionTimeout + " ms]");
} else if (connectionTimeout > 0) {
unit = Math.min(UNIT_CONNECT_TIMEOUT, connectionTimeout);
}
long startTime = Time.monotonicNow();
long lastTime = startTime;
int attempts = 0;
// set the connect timeout to the unit-connect-timeout
connection.setConnectTimeout(unit);
while (true) {
try {
attempts++;
connection.connect();
break;
} catch (IOException ioe) {
long currentTime = Time.monotonicNow();
long retryTime = currentTime - startTime;
long leftTime = connectionTimeout - retryTime;
long timeSinceLastIteration = currentTime - lastTime;
// throw an exception if we have waited for timeout amount of time
// note that the updated value if timeout is used here
if (leftTime <= 0) {
int retryTimeInSeconds = (int) retryTime/1000;
LOG.error("Connection retry failed with " + attempts +
" attempts in " + retryTimeInSeconds + " seconds");
throw ioe;
}
// reset the connect timeout for the last try
if (leftTime < unit) {
unit = (int)leftTime;
// reset the connect time out for the final connect
connection.setConnectTimeout(unit);
}
if (timeSinceLastIteration < unit) {
try {
// sleep the left time of unit
sleep(unit - timeSinceLastIteration);
} catch (InterruptedException e) {
LOG.warn("Sleep in connection retry get interrupted.");
if (stopped) {
return;
}
}
}
// update the total remaining connect-timeout
lastTime = Time.monotonicNow();
}
}
}
}
| 25,472 | 33.70436 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
abstract class MergeThread<T,K,V> extends Thread {
private static final Log LOG = LogFactory.getLog(MergeThread.class);
private AtomicInteger numPending = new AtomicInteger(0);
private LinkedList<List<T>> pendingToBeMerged;
protected final MergeManagerImpl<K,V> manager;
private final ExceptionReporter reporter;
private boolean closed = false;
private final int mergeFactor;
public MergeThread(MergeManagerImpl<K,V> manager, int mergeFactor,
ExceptionReporter reporter) {
this.pendingToBeMerged = new LinkedList<List<T>>();
this.manager = manager;
this.mergeFactor = mergeFactor;
this.reporter = reporter;
}
public synchronized void close() throws InterruptedException {
closed = true;
waitForMerge();
interrupt();
}
public void startMerge(Set<T> inputs) {
if (!closed) {
numPending.incrementAndGet();
List<T> toMergeInputs = new ArrayList<T>();
Iterator<T> iter=inputs.iterator();
for (int ctr = 0; iter.hasNext() && ctr < mergeFactor; ++ctr) {
toMergeInputs.add(iter.next());
iter.remove();
}
LOG.info(getName() + ": Starting merge with " + toMergeInputs.size() +
" segments, while ignoring " + inputs.size() + " segments");
synchronized(pendingToBeMerged) {
pendingToBeMerged.addLast(toMergeInputs);
pendingToBeMerged.notifyAll();
}
}
}
public synchronized void waitForMerge() throws InterruptedException {
while (numPending.get() > 0) {
wait();
}
}
public void run() {
while (true) {
List<T> inputs = null;
try {
// Wait for notification to start the merge...
synchronized (pendingToBeMerged) {
while(pendingToBeMerged.size() <= 0) {
pendingToBeMerged.wait();
}
// Pickup the inputs to merge.
inputs = pendingToBeMerged.removeFirst();
}
// Merge
merge(inputs);
} catch (InterruptedException ie) {
numPending.set(0);
return;
} catch(Throwable t) {
numPending.set(0);
reporter.reportException(t);
return;
} finally {
synchronized (this) {
numPending.decrementAndGet();
notifyAll();
}
}
}
}
public abstract void merge(List<T> inputs) throws IOException;
}
| 3,539 | 30.327434 | 77 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.