repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.tools.CopyListing.*;
import org.apache.hadoop.tools.mapred.CopyMapper;
import org.apache.hadoop.tools.mapred.CopyOutputFormat;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.annotations.VisibleForTesting;
/**
* DistCp is the main driver-class for DistCpV2.
* For command-line use, DistCp::main() orchestrates the parsing of command-line
* parameters and the launch of the DistCp job.
* For programmatic use, a DistCp object can be constructed by specifying
* options (in a DistCpOptions object), and DistCp::execute() may be used to
* launch the copy-job. DistCp may alternatively be sub-classed to fine-tune
* behaviour.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DistCp extends Configured implements Tool {
/**
* Priority of the shutdown hook.
*/
static final int SHUTDOWN_HOOK_PRIORITY = 30;
static final Log LOG = LogFactory.getLog(DistCp.class);
private DistCpOptions inputOptions;
private Path metaFolder;
private static final String PREFIX = "_distcp";
private static final String WIP_PREFIX = "._WIP_";
private static final String DISTCP_DEFAULT_XML = "distcp-default.xml";
static final Random rand = new Random();
private boolean submitted;
private FileSystem jobFS;
/**
* Public Constructor. Creates DistCp object with specified input-parameters.
* (E.g. source-paths, target-location, etc.)
* @param inputOptions Options (indicating source-paths, target-location.)
* @param configuration The Hadoop configuration against which the Copy-mapper must run.
* @throws Exception
*/
public DistCp(Configuration configuration, DistCpOptions inputOptions) throws Exception {
Configuration config = new Configuration(configuration);
config.addResource(DISTCP_DEFAULT_XML);
setConf(config);
this.inputOptions = inputOptions;
this.metaFolder = createMetaFolderPath();
}
/**
* To be used with the ToolRunner. Not for public consumption.
*/
@VisibleForTesting
DistCp() {}
/**
* Implementation of Tool::run(). Orchestrates the copy of source file(s)
* to target location, by:
* 1. Creating a list of files to be copied to target.
* 2. Launching a Map-only job to copy the files. (Delegates to execute().)
* @param argv List of arguments passed to DistCp, from the ToolRunner.
* @return On success, it returns 0. Else, -1.
*/
@Override
public int run(String[] argv) {
if (argv.length < 1) {
OptionsParser.usage();
return DistCpConstants.INVALID_ARGUMENT;
}
try {
inputOptions = (OptionsParser.parse(argv));
setTargetPathExists();
LOG.info("Input Options: " + inputOptions);
} catch (Throwable e) {
LOG.error("Invalid arguments: ", e);
System.err.println("Invalid arguments: " + e.getMessage());
OptionsParser.usage();
return DistCpConstants.INVALID_ARGUMENT;
}
try {
execute();
} catch (InvalidInputException e) {
LOG.error("Invalid input: ", e);
return DistCpConstants.INVALID_ARGUMENT;
} catch (DuplicateFileException e) {
LOG.error("Duplicate files in input path: ", e);
return DistCpConstants.DUPLICATE_INPUT;
} catch (AclsNotSupportedException e) {
LOG.error("ACLs not supported on at least one file system: ", e);
return DistCpConstants.ACLS_NOT_SUPPORTED;
} catch (XAttrsNotSupportedException e) {
LOG.error("XAttrs not supported on at least one file system: ", e);
return DistCpConstants.XATTRS_NOT_SUPPORTED;
} catch (Exception e) {
LOG.error("Exception encountered ", e);
return DistCpConstants.UNKNOWN_ERROR;
}
return DistCpConstants.SUCCESS;
}
/**
* Implements the core-execution. Creates the file-list for copy,
* and launches the Hadoop-job, to do the copy.
* @return Job handle
* @throws Exception
*/
public Job execute() throws Exception {
Job job = createAndSubmitJob();
if (inputOptions.shouldBlock()) {
waitForJobCompletion(job);
}
return job;
}
/**
* Create and submit the mapreduce job.
* @return The mapreduce job object that has been submitted
*/
public Job createAndSubmitJob() throws Exception {
assert inputOptions != null;
assert getConf() != null;
Job job = null;
try {
synchronized(this) {
//Don't cleanup while we are setting up.
metaFolder = createMetaFolderPath();
jobFS = metaFolder.getFileSystem(getConf());
job = createJob();
}
if (inputOptions.shouldUseDiff()) {
if (!DistCpSync.sync(inputOptions, getConf())) {
inputOptions.disableUsingDiff();
}
}
createInputFileListing(job);
job.submit();
submitted = true;
} finally {
if (!submitted) {
cleanup();
}
}
String jobID = job.getJobID().toString();
job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
LOG.info("DistCp job-id: " + jobID);
return job;
}
/**
* Wait for the given job to complete.
* @param job the given mapreduce job that has already been submitted
*/
public void waitForJobCompletion(Job job) throws Exception {
assert job != null;
if (!job.waitForCompletion(true)) {
throw new IOException("DistCp failure: Job " + job.getJobID()
+ " has failed: " + job.getStatus().getFailureInfo());
}
}
/**
* Set targetPathExists in both inputOptions and job config,
* for the benefit of CopyCommitter
*/
private void setTargetPathExists() throws IOException {
Path target = inputOptions.getTargetPath();
FileSystem targetFS = target.getFileSystem(getConf());
boolean targetExists = targetFS.exists(target);
inputOptions.setTargetPathExists(targetExists);
getConf().setBoolean(DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS,
targetExists);
}
/**
* Create Job object for submitting it, with all the configuration
*
* @return Reference to job object.
* @throws IOException - Exception if any
*/
private Job createJob() throws IOException {
String jobName = "distcp";
String userChosenName = getConf().get(JobContext.JOB_NAME);
if (userChosenName != null)
jobName += ": " + userChosenName;
Job job = Job.getInstance(getConf());
job.setJobName(jobName);
job.setInputFormatClass(DistCpUtils.getStrategy(getConf(), inputOptions));
job.setJarByClass(CopyMapper.class);
configureOutputFormat(job);
job.setMapperClass(CopyMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputFormatClass(CopyOutputFormat.class);
job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
job.getConfiguration().set(JobContext.NUM_MAPS,
String.valueOf(inputOptions.getMaxMaps()));
if (inputOptions.getSslConfigurationFile() != null) {
setupSSLConfig(job);
}
inputOptions.appendToConf(job.getConfiguration());
return job;
}
/**
* Setup ssl configuration on the job configuration to enable hsftp access
* from map job. Also copy the ssl configuration file to Distributed cache
*
* @param job - Reference to job's handle
* @throws java.io.IOException - Exception if unable to locate ssl config file
*/
private void setupSSLConfig(Job job) throws IOException {
Configuration configuration = job.getConfiguration();
Path sslConfigPath = new Path(configuration.
getResource(inputOptions.getSslConfigurationFile()).toString());
addSSLFilesToDistCache(job, sslConfigPath);
configuration.set(DistCpConstants.CONF_LABEL_SSL_CONF, sslConfigPath.getName());
configuration.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfigPath.getName());
}
/**
* Add SSL files to distributed cache. Trust store, key store and ssl config xml
*
* @param job - Job handle
* @param sslConfigPath - ssl Configuration file specified through options
* @throws IOException - If any
*/
private void addSSLFilesToDistCache(Job job,
Path sslConfigPath) throws IOException {
Configuration configuration = job.getConfiguration();
FileSystem localFS = FileSystem.getLocal(configuration);
Configuration sslConf = new Configuration(false);
sslConf.addResource(sslConfigPath);
Path localStorePath = getLocalStorePath(sslConf,
DistCpConstants.CONF_LABEL_SSL_TRUST_STORE_LOCATION);
job.addCacheFile(localStorePath.makeQualified(localFS.getUri(),
localFS.getWorkingDirectory()).toUri());
configuration.set(DistCpConstants.CONF_LABEL_SSL_TRUST_STORE_LOCATION,
localStorePath.getName());
localStorePath = getLocalStorePath(sslConf,
DistCpConstants.CONF_LABEL_SSL_KEY_STORE_LOCATION);
job.addCacheFile(localStorePath.makeQualified(localFS.getUri(),
localFS.getWorkingDirectory()).toUri());
configuration.set(DistCpConstants.CONF_LABEL_SSL_KEY_STORE_LOCATION,
localStorePath.getName());
job.addCacheFile(sslConfigPath.makeQualified(localFS.getUri(),
localFS.getWorkingDirectory()).toUri());
}
/**
* Get Local Trust store/key store path
*
* @param sslConf - Config from SSL Client xml
* @param storeKey - Key for either trust store or key store
* @return - Path where the store is present
* @throws IOException -If any
*/
private Path getLocalStorePath(Configuration sslConf, String storeKey) throws IOException {
if (sslConf.get(storeKey) != null) {
return new Path(sslConf.get(storeKey));
} else {
throw new IOException("Store for " + storeKey + " is not set in " +
inputOptions.getSslConfigurationFile());
}
}
/**
* Setup output format appropriately
*
* @param job - Job handle
* @throws IOException - Exception if any
*/
private void configureOutputFormat(Job job) throws IOException {
final Configuration configuration = job.getConfiguration();
Path targetPath = inputOptions.getTargetPath();
FileSystem targetFS = targetPath.getFileSystem(configuration);
targetPath = targetPath.makeQualified(targetFS.getUri(),
targetFS.getWorkingDirectory());
if (inputOptions.shouldPreserve(DistCpOptions.FileAttribute.ACL)) {
DistCpUtils.checkFileSystemAclSupport(targetFS);
}
if (inputOptions.shouldPreserve(DistCpOptions.FileAttribute.XATTR)) {
DistCpUtils.checkFileSystemXAttrSupport(targetFS);
}
if (inputOptions.shouldAtomicCommit()) {
Path workDir = inputOptions.getAtomicWorkPath();
if (workDir == null) {
workDir = targetPath.getParent();
}
workDir = new Path(workDir, WIP_PREFIX + targetPath.getName()
+ rand.nextInt());
FileSystem workFS = workDir.getFileSystem(configuration);
if (!FileUtil.compareFs(targetFS, workFS)) {
throw new IllegalArgumentException("Work path " + workDir +
" and target path " + targetPath + " are in different file system");
}
CopyOutputFormat.setWorkingDirectory(job, workDir);
} else {
CopyOutputFormat.setWorkingDirectory(job, targetPath);
}
CopyOutputFormat.setCommitDirectory(job, targetPath);
Path logPath = inputOptions.getLogPath();
if (logPath == null) {
logPath = new Path(metaFolder, "_logs");
} else {
LOG.info("DistCp job log path: " + logPath);
}
CopyOutputFormat.setOutputPath(job, logPath);
}
/**
* Create input listing by invoking an appropriate copy listing
* implementation. Also add delegation tokens for each path
* to job's credential store
*
* @param job - Handle to job
* @return Returns the path where the copy listing is created
* @throws IOException - If any
*/
protected Path createInputFileListing(Job job) throws IOException {
Path fileListingPath = getFileListingPath();
CopyListing copyListing = CopyListing.getCopyListing(job.getConfiguration(),
job.getCredentials(), inputOptions);
copyListing.buildListing(fileListingPath, inputOptions);
return fileListingPath;
}
/**
* Get default name of the copy listing file. Use the meta folder
* to create the copy listing file
*
* @return - Path where the copy listing file has to be saved
* @throws IOException - Exception if any
*/
protected Path getFileListingPath() throws IOException {
String fileListPathStr = metaFolder + "/fileList.seq";
Path path = new Path(fileListPathStr);
return new Path(path.toUri().normalize().toString());
}
/**
* Create a default working folder for the job, under the
* job staging directory
*
* @return Returns the working folder information
* @throws Exception - Exception if any
*/
private Path createMetaFolderPath() throws Exception {
Configuration configuration = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(
new Cluster(configuration), configuration);
Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
if (LOG.isDebugEnabled())
LOG.debug("Meta folder location: " + metaFolderPath);
configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
return metaFolderPath;
}
/**
* Main function of the DistCp program. Parses the input arguments (via OptionsParser),
* and invokes the DistCp::run() method, via the ToolRunner.
* @param argv Command-line arguments sent to DistCp.
*/
public static void main(String argv[]) {
int exitCode;
try {
DistCp distCp = new DistCp();
Cleanup CLEANUP = new Cleanup(distCp);
ShutdownHookManager.get().addShutdownHook(CLEANUP,
SHUTDOWN_HOOK_PRIORITY);
exitCode = ToolRunner.run(getDefaultConf(), distCp, argv);
}
catch (Exception e) {
LOG.error("Couldn't complete DistCp operation: ", e);
exitCode = DistCpConstants.UNKNOWN_ERROR;
}
System.exit(exitCode);
}
/**
* Loads properties from distcp-default.xml into configuration
* object
* @return Configuration which includes properties from distcp-default.xml
*/
private static Configuration getDefaultConf() {
Configuration config = new Configuration();
config.addResource(DISTCP_DEFAULT_XML);
return config;
}
private synchronized void cleanup() {
try {
if (metaFolder == null) return;
jobFS.delete(metaFolder, true);
metaFolder = null;
} catch (IOException e) {
LOG.error("Unable to cleanup meta folder: " + metaFolder, e);
}
}
private boolean isSubmitted() {
return submitted;
}
private static class Cleanup implements Runnable {
private final DistCp distCp;
Cleanup(DistCp distCp) {
this.distCp = distCp;
}
@Override
public void run() {
if (distCp.isSubmitted()) return;
distCp.cleanup();
}
}
}
| 16,921 | 34.180873 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.Credentials;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
/**
* FileBasedCopyListing implements the CopyListing interface,
* to create the copy-listing for DistCp,
* by iterating over all source paths mentioned in a specified input-file.
*/
public class FileBasedCopyListing extends CopyListing {
private final CopyListing globbedListing;
/**
* Constructor, to initialize base-class.
* @param configuration The input Configuration object.
* @param credentials - Credentials object on which the FS delegation tokens are cached. If null
* delegation token caching is skipped
*/
public FileBasedCopyListing(Configuration configuration, Credentials credentials) {
super(configuration, credentials);
globbedListing = new GlobbedCopyListing(getConf(), credentials);
}
/** {@inheritDoc} */
@Override
protected void validatePaths(DistCpOptions options)
throws IOException, InvalidInputException {
}
/**
* Implementation of CopyListing::buildListing().
* Iterates over all source paths mentioned in the input-file.
* @param pathToListFile Path on HDFS where the listing file is written.
* @param options Input Options for DistCp (indicating source/target paths.)
* @throws IOException
*/
@Override
public void doBuildListing(Path pathToListFile, DistCpOptions options) throws IOException {
DistCpOptions newOption = new DistCpOptions(options);
newOption.setSourcePaths(fetchFileList(options.getSourceFileListing()));
globbedListing.buildListing(pathToListFile, newOption);
}
private List<Path> fetchFileList(Path sourceListing) throws IOException {
List<Path> result = new ArrayList<Path>();
FileSystem fs = sourceListing.getFileSystem(getConf());
BufferedReader input = null;
try {
input = new BufferedReader(new InputStreamReader(fs.open(sourceListing),
Charset.forName("UTF-8")));
String line = input.readLine();
while (line != null) {
result.add(new Path(line));
line = input.readLine();
}
} finally {
IOUtils.closeStream(input);
}
return result;
}
/** {@inheritDoc} */
@Override
protected long getBytesToCopy() {
return globbedListing.getBytesToCopy();
}
/** {@inheritDoc} */
@Override
protected long getNumberOfPaths() {
return globbedListing.getNumberOfPaths();
}
}
| 3,537 | 33.349515 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/TrueCopyFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.fs.Path;
/**
* A CopyFilter which always returns true.
*
*/
public class TrueCopyFilter extends CopyFilter {
@Override
public boolean shouldCopy(Path path) {
return true;
}
}
| 1,057 | 30.117647 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
/**
* Information presenting a rename/delete op derived from a snapshot diff entry.
* This includes the source file/dir of the rename/delete op, and the target
* file/dir of a rename op.
*/
class DiffInfo {
static final Comparator<DiffInfo> sourceComparator = new Comparator<DiffInfo>() {
@Override
public int compare(DiffInfo d1, DiffInfo d2) {
return d2.source.compareTo(d1.source);
}
};
static final Comparator<DiffInfo> targetComparator = new Comparator<DiffInfo>() {
@Override
public int compare(DiffInfo d1, DiffInfo d2) {
return d1.target == null ? -1 :
(d2.target == null ? 1 : d1.target.compareTo(d2.target));
}
};
/** The source file/dir of the rename or deletion op */
final Path source;
/**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
private Path tmp;
/** The target file/dir of the rename op. Null means the op is deletion. */
final Path target;
DiffInfo(Path source, Path target) {
assert source != null;
this.source = source;
this.target= target;
}
void setTmp(Path tmp) {
this.tmp = tmp;
}
Path getTmp() {
return tmp;
}
static DiffInfo[] getDiffs(SnapshotDiffReport report, Path targetDir) {
List<DiffInfo> diffs = new ArrayList<>();
for (SnapshotDiffReport.DiffReportEntry entry : report.getDiffList()) {
if (entry.getType() == SnapshotDiffReport.DiffType.DELETE) {
final Path source = new Path(targetDir,
DFSUtil.bytes2String(entry.getSourcePath()));
diffs.add(new DiffInfo(source, null));
} else if (entry.getType() == SnapshotDiffReport.DiffType.RENAME) {
final Path source = new Path(targetDir,
DFSUtil.bytes2String(entry.getSourcePath()));
final Path target = new Path(targetDir,
DFSUtil.bytes2String(entry.getTargetPath()));
diffs.add(new DiffInfo(source, target));
}
}
return diffs.toArray(new DiffInfo[diffs.size()]);
}
}
| 3,114 | 33.230769 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import com.google.common.base.Preconditions;
/**
* The OptionsParser parses out the command-line options passed to DistCp,
* and interprets those specific to DistCp, to create an Options object.
*/
public class OptionsParser {
private static final Log LOG = LogFactory.getLog(OptionsParser.class);
private static final Options cliOptions = new Options();
static {
for (DistCpOptionSwitch option : DistCpOptionSwitch.values()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding option " + option.getOption());
}
cliOptions.addOption(option.getOption());
}
}
private static class CustomParser extends GnuParser {
@Override
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
for (int index = 0; index < arguments.length; index++) {
if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
arguments[index] = DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT;
}
}
return super.flatten(options, arguments, stopAtNonOption);
}
}
/**
* The parse method parses the command-line options, and creates
* a corresponding Options object.
* @param args Command-line arguments (excluding the options consumed
* by the GenericOptionsParser).
* @return The Options object, corresponding to the specified command-line.
* @throws IllegalArgumentException Thrown if the parse fails.
*/
public static DistCpOptions parse(String args[]) throws IllegalArgumentException {
CommandLineParser parser = new CustomParser();
CommandLine command;
try {
command = parser.parse(cliOptions, args, true);
} catch (ParseException e) {
throw new IllegalArgumentException("Unable to parse arguments. " +
Arrays.toString(args), e);
}
DistCpOptions option = parseSourceAndTargetPaths(command);
//Process all the other option switches and set options appropriately
if (command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) {
option.setIgnoreFailures(true);
}
if (command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) {
option.setAtomicCommit(true);
}
if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch()) &&
option.shouldAtomicCommit()) {
String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch());
if (workPath != null && !workPath.isEmpty()) {
option.setAtomicWorkPath(new Path(workPath));
}
} else if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) {
throw new IllegalArgumentException("-tmp work-path can only be specified along with -atomic");
}
if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) {
option.setLogPath(new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch())));
}
if (command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) {
option.setSyncFolder(true);
}
if (command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) {
option.setOverwrite(true);
}
if (command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) {
option.setAppend(true);
}
if (command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) {
option.setDeleteMissing(true);
}
if (command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) {
option.setSkipCRC(true);
}
if (command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) {
option.setBlocking(false);
}
parseBandwidth(command, option);
if (command.hasOption(DistCpOptionSwitch.SSL_CONF.getSwitch())) {
option.setSslConfigurationFile(command.
getOptionValue(DistCpOptionSwitch.SSL_CONF.getSwitch()));
}
parseNumListStatusThreads(command, option);
parseMaxMaps(command, option);
if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) {
option.setCopyStrategy(
getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch()));
}
parsePreserveStatus(command, option);
if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) {
String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch());
Preconditions.checkArgument(snapshots != null && snapshots.length == 2,
"Must provide both the starting and ending snapshot names");
option.setUseDiff(true, snapshots[0], snapshots[1]);
}
parseFileLimit(command);
parseSizeLimit(command);
if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) {
option.setFiltersFile(getVal(command,
DistCpOptionSwitch.FILTERS.getSwitch()));
}
return option;
}
/**
* parseSizeLimit is a helper method for parsing the deprecated
* argument SIZE_LIMIT.
*
* @param command command line arguments
*/
private static void parseSizeLimit(CommandLine command) {
if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) {
String sizeLimitString = getVal(command,
DistCpOptionSwitch.SIZE_LIMIT.getSwitch().trim());
try {
Long.parseLong(sizeLimitString);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Size-limit is invalid: "
+ sizeLimitString, e);
}
LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
}
/**
* parseFileLimit is a helper method for parsing the deprecated
* argument FILE_LIMIT.
*
* @param command command line arguments
*/
private static void parseFileLimit(CommandLine command) {
if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {
String fileLimitString = getVal(command,
DistCpOptionSwitch.FILE_LIMIT.getSwitch().trim());
try {
Integer.parseInt(fileLimitString);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("File-limit is invalid: "
+ fileLimitString, e);
}
LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
}
/**
* parsePreserveStatus is a helper method for parsing PRESERVE_STATUS.
*
* @param command command line arguments
* @param option parsed distcp options
*/
private static void parsePreserveStatus(CommandLine command,
DistCpOptions option) {
if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
String attributes =
getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch());
if (attributes == null || attributes.isEmpty()) {
for (FileAttribute attribute : FileAttribute.values()) {
option.preserve(attribute);
}
} else {
for (int index = 0; index < attributes.length(); index++) {
option.preserve(FileAttribute.
getAttribute(attributes.charAt(index)));
}
}
}
}
/**
* parseMaxMaps is a helper method for parsing MAX_MAPS.
*
* @param command command line arguments
* @param option parsed distcp options
*/
private static void parseMaxMaps(CommandLine command,
DistCpOptions option) {
if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) {
try {
Integer maps = Integer.parseInt(
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()).trim());
option.setMaxMaps(maps);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Number of maps is invalid: " +
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e);
}
}
}
/**
* parseNumListStatusThreads is a helper method for parsing
* NUM_LISTSTATUS_THREADS.
*
* @param command command line arguments
* @param option parsed distcp options
*/
private static void parseNumListStatusThreads(CommandLine command,
DistCpOptions option) {
if (command.hasOption(
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) {
try {
Integer numThreads = Integer.parseInt(getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()).trim());
option.setNumListstatusThreads(numThreads);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Number of liststatus threads is invalid: " + getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e);
}
}
}
/**
* parseBandwidth is a helper method for parsing BANDWIDTH.
*
* @param command command line arguments
* @param option parsed distcp options
*/
private static void parseBandwidth(CommandLine command,
DistCpOptions option) {
if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) {
try {
Integer mapBandwidth = Integer.parseInt(
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()).trim());
if (mapBandwidth <= 0) {
throw new IllegalArgumentException("Bandwidth specified is not " +
"positive: " + mapBandwidth);
}
option.setMapBandwidth(mapBandwidth);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Bandwidth specified is invalid: " +
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e);
}
}
}
/**
* parseSourceAndTargetPaths is a helper method for parsing the source
* and target paths.
*
* @param command command line arguments
* @return DistCpOptions
*/
private static DistCpOptions parseSourceAndTargetPaths(
CommandLine command) {
DistCpOptions option;
Path targetPath;
List<Path> sourcePaths = new ArrayList<Path>();
String[] leftOverArgs = command.getArgs();
if (leftOverArgs == null || leftOverArgs.length < 1) {
throw new IllegalArgumentException("Target path not specified");
}
//Last Argument is the target path
targetPath = new Path(leftOverArgs[leftOverArgs.length - 1].trim());
//Copy any source paths in the arguments to the list
for (int index = 0; index < leftOverArgs.length - 1; index++) {
sourcePaths.add(new Path(leftOverArgs[index].trim()));
}
/* If command has source file listing, use it else, fall back on source
paths in args. If both are present, throw exception and bail */
if (command.hasOption(
DistCpOptionSwitch.SOURCE_FILE_LISTING.getSwitch())) {
if (!sourcePaths.isEmpty()) {
throw new IllegalArgumentException("Both source file listing and " +
"source paths present");
}
option = new DistCpOptions(new Path(getVal(command, DistCpOptionSwitch.
SOURCE_FILE_LISTING.getSwitch())), targetPath);
} else {
if (sourcePaths.isEmpty()) {
throw new IllegalArgumentException("Neither source file listing nor " +
"source paths present");
}
option = new DistCpOptions(sourcePaths, targetPath);
}
return option;
}
private static String getVal(CommandLine command, String swtch) {
String optionValue = command.getOptionValue(swtch);
if (optionValue == null) {
return null;
} else {
return optionValue.trim();
}
}
private static String[] getVals(CommandLine command, String option) {
return command.getOptionValues(option);
}
public static void usage() {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("distcp OPTIONS [source_path...] <target_path>\n\nOPTIONS", cliOptions);
}
}
| 13,238 | 34.398396 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Interface for excluding files from DistCp.
*
*/
public abstract class CopyFilter {
/**
* Default initialize method does nothing.
*/
public void initialize() {}
/**
* Predicate to determine if a file can be excluded from copy.
*
* @param path a Path to be considered for copying
* @return boolean, true to copy, false to exclude
*/
public abstract boolean shouldCopy(Path path);
/**
* Public factory method which returns the appropriate implementation of
* CopyFilter.
*
* @param conf DistCp configuratoin
* @return An instance of the appropriate CopyFilter
*/
public static CopyFilter getCopyFilter(Configuration conf) {
String filtersFilename = conf.get(DistCpConstants.CONF_LABEL_FILTERS_FILE);
if (filtersFilename == null) {
return new TrueCopyFilter();
} else {
String filterFilename = conf.get(
DistCpConstants.CONF_LABEL_FILTERS_FILE);
return new RegexCopyFilter(filterFilename);
}
}
}
| 1,923 | 30.540984 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
/**
* WorkReport<T> is a simple container for items of class T and its
* corresponding retry counter that indicates how many times this item
* was previously attempted to be processed.
*/
public class WorkReport<T> {
private T item;
private final boolean success;
private final int retry;
private final Exception exception;
/**
* @param item Object representing work report.
* @param retry Number of unsuccessful attempts to process work.
* @param success Indicates whether work was successfully completed.
*/
public WorkReport(T item, int retry, boolean success) {
this(item, retry, success, null);
}
/**
* @param item Object representing work report.
* @param retry Number of unsuccessful attempts to process work.
* @param success Indicates whether work was successfully completed.
* @param exception Exception thrown while processing work.
*/
public WorkReport(T item, int retry, boolean success, Exception exception) {
this.item = item;
this.retry = retry;
this.success = success;
this.exception = exception;
}
public T getItem() {
return item;
}
/**
* @return True if the work was processed successfully.
*/
public boolean getSuccess() {
return success;
}
/**
* @return Number of unsuccessful attempts to process work.
*/
public int getRetry() {
return retry;
}
/**
* @return Exception thrown while processing work.
*/
public Exception getException() {
return exception;
}
}
| 2,412 | 29.544304 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.tools.util.WorkReport;
import org.apache.hadoop.tools.util.WorkRequest;
import org.apache.hadoop.tools.util.WorkRequestProcessor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.ArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
/**
* ProducerConsumer class encapsulates input and output queues and a
* thread-pool of Workers that loop on WorkRequest<T> inputQueue and for each
* consumed WorkRequest Workers invoke WorkRequestProcessor.processItem()
* and output resulting WorkReport<R> to the outputQueue.
*/
public class ProducerConsumer<T, R> {
private Log LOG = LogFactory.getLog(ProducerConsumer.class);
private LinkedBlockingQueue<WorkRequest<T>> inputQueue;
private LinkedBlockingQueue<WorkReport<R>> outputQueue;
private ExecutorService executor;
private AtomicInteger workCnt;
/**
* ProducerConsumer maintains input and output queues and a thread-pool of
* workers.
*
* @param numThreads Size of thread-pool to execute Workers.
*/
public ProducerConsumer(int numThreads) {
this.inputQueue = new LinkedBlockingQueue<WorkRequest<T>>();
this.outputQueue = new LinkedBlockingQueue<WorkReport<R>>();
executor = Executors.newFixedThreadPool(numThreads);
workCnt = new AtomicInteger(0);
}
/**
* Add another worker that will consume WorkRequest<T> items from input
* queue, process each item using supplied processor, and for every
* processed item output WorkReport<R> to output queue.
*
* @param processor Processor implementing WorkRequestProcessor interface.
*
*/
public void addWorker(WorkRequestProcessor<T, R> processor) {
executor.execute(new Worker(processor));
}
/**
* Shutdown ProducerConsumer worker thread-pool without waiting for
* completion of any pending work.
*/
public void shutdown() {
executor.shutdown();
}
/**
* Returns number of pending ProducerConsumer items (submitted to input
* queue for processing via put() method but not yet consumed by take()
* or blockingTake().
*
* @return Number of items in ProducerConsumer (either pending for
* processing or waiting to be consumed).
*/
public int getWorkCnt() {
return workCnt.get();
}
/**
* Returns true if there are items in ProducerConsumer that are either
* pending for processing or waiting to be consumed.
*
* @return True if there were more items put() to ProducerConsumer than
* consumed by take() or blockingTake().
*/
public boolean hasWork() {
return workCnt.get() > 0;
}
/**
* Blocking put workRequest to ProducerConsumer input queue.
*
* @param WorkRequest<T> item to be processed.
*/
public void put(WorkRequest<T> workRequest) {
boolean isDone = false;
while (!isDone) {
try {
inputQueue.put(workRequest);
workCnt.incrementAndGet();
isDone = true;
} catch (InterruptedException ie) {
LOG.error("Could not put workRequest into inputQueue. Retrying...");
}
}
}
/**
* Blocking take from ProducerConsumer output queue that can be interrupted.
*
* @return WorkReport<R> item returned by processor's processItem().
*/
public WorkReport<R> take() throws InterruptedException {
WorkReport<R> report = outputQueue.take();
workCnt.decrementAndGet();
return report;
}
/**
* Blocking take from ProducerConsumer output queue (catches exceptions and
* retries forever).
*
* @return WorkReport<R> item returned by processor's processItem().
*/
public WorkReport<R> blockingTake() {
while (true) {
try {
WorkReport<R> report = outputQueue.take();
workCnt.decrementAndGet();
return report;
} catch (InterruptedException ie) {
LOG.debug("Retrying in blockingTake...");
}
}
}
private class Worker implements Runnable {
private WorkRequestProcessor<T, R> processor;
public Worker(WorkRequestProcessor<T, R> processor) {
this.processor = processor;
}
public void run() {
while (true) {
try {
WorkRequest<T> work = inputQueue.take();
WorkReport<R> result = processor.processItem(work);
boolean isDone = false;
while (!isDone) {
try {
outputQueue.put(result);
isDone = true;
} catch (InterruptedException ie) {
LOG.debug("Could not put report into outputQueue. Retrying...");
}
}
} catch (InterruptedException ie) {
LOG.debug("Interrupted while waiting for request from inputQueue.");
}
}
}
}
}
| 5,749 | 31.303371 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.tools.CopyListing.AclsNotSupportedException;
import org.apache.hadoop.tools.CopyListing.XAttrsNotSupportedException;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.mapred.UniformSizeInputFormat;
import com.google.common.collect.Maps;
import org.apache.hadoop.util.StringUtils;
/**
* Utility functions used in DistCp.
*/
public class DistCpUtils {
private static final Log LOG = LogFactory.getLog(DistCpUtils.class);
/**
* Retrieves size of the file at the specified path.
* @param path The path of the file whose size is sought.
* @param configuration Configuration, to retrieve the appropriate FileSystem.
* @return The file-size, in number of bytes.
* @throws IOException
*/
public static long getFileSize(Path path, Configuration configuration)
throws IOException {
if (LOG.isDebugEnabled())
LOG.debug("Retrieving file size for: " + path);
return path.getFileSystem(configuration).getFileStatus(path).getLen();
}
/**
* Utility to publish a value to a configuration.
* @param configuration The Configuration to which the value must be written.
* @param label The label for the value being published.
* @param value The value being published.
* @param <T> The type of the value.
*/
public static <T> void publish(Configuration configuration,
String label, T value) {
configuration.set(label, String.valueOf(value));
}
/**
* Utility to retrieve a specified key from a Configuration. Throw exception
* if not found.
* @param configuration The Configuration in which the key is sought.
* @param label The key being sought.
* @return Integer value of the key.
*/
public static int getInt(Configuration configuration, String label) {
int value = configuration.getInt(label, -1);
assert value >= 0 : "Couldn't find " + label;
return value;
}
/**
* Utility to retrieve a specified key from a Configuration. Throw exception
* if not found.
* @param configuration The Configuration in which the key is sought.
* @param label The key being sought.
* @return Long value of the key.
*/
public static long getLong(Configuration configuration, String label) {
long value = configuration.getLong(label, -1);
assert value >= 0 : "Couldn't find " + label;
return value;
}
/**
* Returns the class that implements a copy strategy. Looks up the implementation for
* a particular strategy from distcp-default.xml
*
* @param conf - Configuration object
* @param options - Handle to input options
* @return Class implementing the strategy specified in options.
*/
public static Class<? extends InputFormat> getStrategy(Configuration conf,
DistCpOptions options) {
String confLabel = "distcp."
+ StringUtils.toLowerCase(options.getCopyStrategy())
+ ".strategy" + ".impl";
return conf.getClass(confLabel, UniformSizeInputFormat.class, InputFormat.class);
}
/**
* Gets relative path of child path with respect to a root path
* For ex. If childPath = /tmp/abc/xyz/file and
* sourceRootPath = /tmp/abc
* Relative path would be /xyz/file
* If childPath = /file and
* sourceRootPath = /
* Relative path would be /file
* @param sourceRootPath - Source root path
* @param childPath - Path for which relative path is required
* @return - Relative portion of the child path (always prefixed with /
* unless it is empty
*/
public static String getRelativePath(Path sourceRootPath, Path childPath) {
String childPathString = childPath.toUri().getPath();
String sourceRootPathString = sourceRootPath.toUri().getPath();
return sourceRootPathString.equals("/") ? childPathString :
childPathString.substring(sourceRootPathString.length());
}
/**
* Pack file preservation attributes into a string, containing
* just the first character of each preservation attribute
* @param attributes - Attribute set to preserve
* @return - String containing first letters of each attribute to preserve
*/
public static String packAttributes(EnumSet<FileAttribute> attributes) {
StringBuffer buffer = new StringBuffer(FileAttribute.values().length);
int len = 0;
for (FileAttribute attribute : attributes) {
buffer.append(attribute.name().charAt(0));
len++;
}
return buffer.substring(0, len);
}
/**
* Unpacks preservation attribute string containing the first character of
* each preservation attribute back to a set of attributes to preserve
* @param attributes - Attribute string
* @return - Attribute set
*/
public static EnumSet<FileAttribute> unpackAttributes(String attributes) {
EnumSet<FileAttribute> retValue = EnumSet.noneOf(FileAttribute.class);
if (attributes != null) {
for (int index = 0; index < attributes.length(); index++) {
retValue.add(FileAttribute.getAttribute(attributes.charAt(index)));
}
}
return retValue;
}
/**
* Preserve attribute on file matching that of the file status being sent
* as argument. Barring the block size, all the other attributes are preserved
* by this function
*
* @param targetFS - File system
* @param path - Path that needs to preserve original file status
* @param srcFileStatus - Original file status
* @param attributes - Attribute set that needs to be preserved
* @param preserveRawXattrs if true, raw.* xattrs should be preserved
* @throws IOException - Exception if any (particularly relating to group/owner
* change or any transient error)
*/
public static void preserve(FileSystem targetFS, Path path,
CopyListingFileStatus srcFileStatus,
EnumSet<FileAttribute> attributes,
boolean preserveRawXattrs) throws IOException {
FileStatus targetFileStatus = targetFS.getFileStatus(path);
String group = targetFileStatus.getGroup();
String user = targetFileStatus.getOwner();
boolean chown = false;
if (attributes.contains(FileAttribute.ACL)) {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.
if (srcFileStatus.getPermission().getStickyBit() !=
targetFileStatus.getPermission().getStickyBit()) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
} else if (attributes.contains(FileAttribute.PERMISSION) &&
!srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXattrs) {
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
String xattrName = entry.getKey();
if (xattrName.startsWith(rawNS) || preserveXAttrs) {
targetFS.setXAttr(path, xattrName, entry.getValue());
}
}
}
}
if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() &&
(srcFileStatus.getReplication() != targetFileStatus.getReplication())) {
targetFS.setReplication(path, srcFileStatus.getReplication());
}
if (attributes.contains(FileAttribute.GROUP) &&
!group.equals(srcFileStatus.getGroup())) {
group = srcFileStatus.getGroup();
chown = true;
}
if (attributes.contains(FileAttribute.USER) &&
!user.equals(srcFileStatus.getOwner())) {
user = srcFileStatus.getOwner();
chown = true;
}
if (chown) {
targetFS.setOwner(path, user, group);
}
if (attributes.contains(FileAttribute.TIMES)) {
targetFS.setTimes(path,
srcFileStatus.getModificationTime(),
srcFileStatus.getAccessTime());
}
}
/**
* Returns a file's full logical ACL.
*
* @param fileSystem FileSystem containing the file
* @param fileStatus FileStatus of file
* @return List containing full logical ACL
* @throws IOException if there is an I/O error
*/
public static List<AclEntry> getAcl(FileSystem fileSystem,
FileStatus fileStatus) throws IOException {
List<AclEntry> entries = fileSystem.getAclStatus(fileStatus.getPath())
.getEntries();
return AclUtil.getAclFromPermAndEntries(fileStatus.getPermission(), entries);
}
/**
* Returns a file's all xAttrs.
*
* @param fileSystem FileSystem containing the file
* @param path file path
* @return Map containing all xAttrs
* @throws IOException if there is an I/O error
*/
public static Map<String, byte[]> getXAttrs(FileSystem fileSystem,
Path path) throws IOException {
return fileSystem.getXAttrs(path);
}
/**
* Converts a FileStatus to a CopyListingFileStatus. If preserving ACLs,
* populates the CopyListingFileStatus with the ACLs. If preserving XAttrs,
* populates the CopyListingFileStatus with the XAttrs.
*
* @param fileSystem FileSystem containing the file
* @param fileStatus FileStatus of file
* @param preserveAcls boolean true if preserving ACLs
* @param preserveXAttrs boolean true if preserving XAttrs
* @param preserveRawXAttrs boolean true if preserving raw.* XAttrs
* @throws IOException if there is an I/O error
*/
public static CopyListingFileStatus toCopyListingFileStatus(
FileSystem fileSystem, FileStatus fileStatus, boolean preserveAcls,
boolean preserveXAttrs, boolean preserveRawXAttrs) throws IOException {
CopyListingFileStatus copyListingFileStatus =
new CopyListingFileStatus(fileStatus);
if (preserveAcls) {
FsPermission perm = fileStatus.getPermission();
if (perm.getAclBit()) {
List<AclEntry> aclEntries = fileSystem.getAclStatus(
fileStatus.getPath()).getEntries();
copyListingFileStatus.setAclEntries(aclEntries);
}
}
if (preserveXAttrs || preserveRawXAttrs) {
Map<String, byte[]> srcXAttrs = fileSystem.getXAttrs(fileStatus.getPath());
if (preserveXAttrs && preserveRawXAttrs) {
copyListingFileStatus.setXAttrs(srcXAttrs);
} else {
Map<String, byte[]> trgXAttrs = Maps.newHashMap();
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
for (Map.Entry<String, byte[]> ent : srcXAttrs.entrySet()) {
final String xattrName = ent.getKey();
if (xattrName.startsWith(rawNS)) {
if (preserveRawXAttrs) {
trgXAttrs.put(xattrName, ent.getValue());
}
} else if (preserveXAttrs) {
trgXAttrs.put(xattrName, ent.getValue());
}
}
copyListingFileStatus.setXAttrs(trgXAttrs);
}
}
return copyListingFileStatus;
}
/**
* Sort sequence file containing FileStatus and Text as key and value respecitvely
*
* @param fs - File System
* @param conf - Configuration
* @param sourceListing - Source listing file
* @return Path of the sorted file. Is source file with _sorted appended to the name
* @throws IOException - Any exception during sort.
*/
public static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing)
throws IOException {
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class,
CopyListingFileStatus.class, conf);
Path output = new Path(sourceListing.toString() + "_sorted");
if (fs.exists(output)) {
fs.delete(output, false);
}
sorter.sort(sourceListing, output);
return output;
}
/**
* Determines if a file system supports ACLs by running a canary getAclStatus
* request on the file system root. This method is used before distcp job
* submission to fail fast if the user requested preserving ACLs, but the file
* system cannot support ACLs.
*
* @param fs FileSystem to check
* @throws AclsNotSupportedException if fs does not support ACLs
*/
public static void checkFileSystemAclSupport(FileSystem fs)
throws AclsNotSupportedException {
try {
fs.getAclStatus(new Path(Path.SEPARATOR));
} catch (Exception e) {
throw new AclsNotSupportedException("ACLs not supported for file system: "
+ fs.getUri());
}
}
/**
* Determines if a file system supports XAttrs by running a getXAttrs request
* on the file system root. This method is used before distcp job submission
* to fail fast if the user requested preserving XAttrs, but the file system
* cannot support XAttrs.
*
* @param fs FileSystem to check
* @throws XAttrsNotSupportedException if fs does not support XAttrs
*/
public static void checkFileSystemXAttrSupport(FileSystem fs)
throws XAttrsNotSupportedException {
try {
fs.getXAttrs(new Path(Path.SEPARATOR));
} catch (Exception e) {
throw new XAttrsNotSupportedException("XAttrs not supported for file system: "
+ fs.getUri());
}
}
/**
* String utility to convert a number-of-bytes to human readable format.
*/
private static final ThreadLocal<DecimalFormat> FORMATTER
= new ThreadLocal<DecimalFormat>() {
@Override
protected DecimalFormat initialValue() {
return new DecimalFormat("0.0");
}
};
public static DecimalFormat getFormatter() {
return FORMATTER.get();
}
public static String getStringDescriptionFor(long nBytes) {
char units [] = {'B', 'K', 'M', 'G', 'T', 'P'};
double current = nBytes;
double prev = current;
int index = 0;
while ((current = current/1024) >= 1) {
prev = current;
++index;
}
assert index < units.length : "Too large a number.";
return getFormatter().format(prev) + units[index];
}
/**
* Utility to compare checksums for the paths specified.
*
* If checksums's can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*
* @param sourceFS FileSystem for the source path.
* @param source The source path.
* @param sourceChecksum The checksum of the source file. If it is null we
* still need to retrieve it through sourceFS.
* @param targetFS FileSystem for the target path.
* @param target The target path.
* @return If either checksum couldn't be retrieved, the function returns
* false. If checksums are retrieved, the function returns true if they match,
* and false otherwise.
* @throws IOException if there's an exception while retrieving checksums.
*/
public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
FileChecksum sourceChecksum, FileSystem targetFS, Path target)
throws IOException {
FileChecksum targetChecksum = null;
try {
sourceChecksum = sourceChecksum != null ? sourceChecksum : sourceFS
.getFileChecksum(source);
targetChecksum = targetFS.getFileChecksum(target);
} catch (IOException e) {
LOG.error("Unable to retrieve checksum for " + source + " or " + target, e);
}
return (sourceChecksum == null || targetChecksum == null ||
sourceChecksum.equals(targetChecksum));
}
}
| 17,796 | 36.705508 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.util.ThreadUtil;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* This class represents commands that be retried on failure, in a configurable
* manner.
*/
public abstract class RetriableCommand {
private static Log LOG = LogFactory.getLog(RetriableCommand.class);
private static final long DELAY_MILLISECONDS = 500;
private static final int MAX_RETRIES = 3;
private RetryPolicy retryPolicy = RetryPolicies.
exponentialBackoffRetry(MAX_RETRIES, DELAY_MILLISECONDS, TimeUnit.MILLISECONDS);
protected String description;
/**
* Constructor.
* @param description The human-readable description of the command.
*/
public RetriableCommand(String description) {
this.description = description;
}
/**
* Constructor.
* @param description The human-readable description of the command.
* @param retryPolicy The RetryHandler to be used to compute retries.
*/
public RetriableCommand(String description, RetryPolicy retryPolicy) {
this(description);
setRetryPolicy(retryPolicy);
}
/**
* Implement this interface-method define the command-logic that will be
* retried on failure (i.e. with Exception).
* @param arguments Argument-list to the command.
* @return Generic "Object".
* @throws Exception Throws Exception on complete failure.
*/
protected abstract Object doExecute(Object... arguments) throws Exception;
/**
* The execute() method invokes doExecute() until either:
* 1. doExecute() succeeds, or
* 2. the command may no longer be retried (e.g. runs out of retry-attempts).
* @param arguments The list of arguments for the command.
* @return Generic "Object" from doExecute(), on success.
* @throws Exception
*/
public Object execute(Object... arguments) throws Exception {
Exception latestException;
int counter = 0;
while (true) {
try {
return doExecute(arguments);
} catch(Exception exception) {
LOG.error("Failure in Retriable command: " + description, exception);
latestException = exception;
}
counter++;
RetryAction action = retryPolicy.shouldRetry(latestException, counter, 0, true);
if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis);
} else {
break;
}
}
throw new IOException("Couldn't run retriable-command: " + description,
latestException);
}
/**
* Fluent-interface to change the RetryHandler.
* @param retryHandler The new RetryHandler instance to be used.
* @return Self.
*/
public RetriableCommand setRetryPolicy(RetryPolicy retryHandler) {
this.retryPolicy = retryHandler;
return this;
}
}
| 3,910 | 33.008696 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequestProcessor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import org.apache.hadoop.tools.util.WorkReport;
import org.apache.hadoop.tools.util.WorkRequest;
/**
* Interface for ProducerConsumer worker loop.
*
*/
public interface WorkRequestProcessor<T, R> {
/**
* Work processor.
*
* @param workRequest Input work item.
* @return Outputs WorkReport after processing workRequest item.
*
*/
public WorkReport<R> processItem(WorkRequest<T> workRequest);
}
| 1,272 | 31.641026 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.PositionedReadable;
import com.google.common.base.Preconditions;
/**
* The ThrottleInputStream provides bandwidth throttling on a specified
* InputStream. It is implemented as a wrapper on top of another InputStream
* instance.
* The throttling works by examining the number of bytes read from the underlying
* InputStream from the beginning, and sleep()ing for a time interval if
* the byte-transfer is found exceed the specified tolerable maximum.
* (Thus, while the read-rate might exceed the maximum for a given short interval,
* the average tends towards the specified maximum, overall.)
*/
public class ThrottledInputStream extends InputStream {
private final InputStream rawStream;
private final long maxBytesPerSec;
private final long startTime = System.currentTimeMillis();
private long bytesRead = 0;
private long totalSleepTime = 0;
private static final long SLEEP_DURATION_MS = 50;
public ThrottledInputStream(InputStream rawStream) {
this(rawStream, Long.MAX_VALUE);
}
public ThrottledInputStream(InputStream rawStream, long maxBytesPerSec) {
assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
this.rawStream = rawStream;
this.maxBytesPerSec = maxBytesPerSec;
}
@Override
public void close() throws IOException {
rawStream.close();
}
/** {@inheritDoc} */
@Override
public int read() throws IOException {
throttle();
int data = rawStream.read();
if (data != -1) {
bytesRead++;
}
return data;
}
/** {@inheritDoc} */
@Override
public int read(byte[] b) throws IOException {
throttle();
int readLen = rawStream.read(b);
if (readLen != -1) {
bytesRead += readLen;
}
return readLen;
}
/** {@inheritDoc} */
@Override
public int read(byte[] b, int off, int len) throws IOException {
throttle();
int readLen = rawStream.read(b, off, len);
if (readLen != -1) {
bytesRead += readLen;
}
return readLen;
}
/**
* Read bytes starting from the specified position. This requires rawStream is
* an instance of {@link PositionedReadable}.
*/
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
if (!(rawStream instanceof PositionedReadable)) {
throw new UnsupportedOperationException(
"positioned read is not supported by the internal stream");
}
throttle();
int readLen = ((PositionedReadable) rawStream).read(position, buffer,
offset, length);
if (readLen != -1) {
bytesRead += readLen;
}
return readLen;
}
private void throttle() throws IOException {
while (getBytesPerSec() > maxBytesPerSec) {
try {
Thread.sleep(SLEEP_DURATION_MS);
totalSleepTime += SLEEP_DURATION_MS;
} catch (InterruptedException e) {
throw new IOException("Thread aborted", e);
}
}
}
/**
* Getter for the number of bytes read from this stream, since creation.
* @return The number of bytes.
*/
public long getTotalBytesRead() {
return bytesRead;
}
/**
* Getter for the read-rate from this stream, since creation.
* Calculated as bytesRead/elapsedTimeSinceStart.
* @return Read rate, in bytes/sec.
*/
public long getBytesPerSec() {
long elapsed = (System.currentTimeMillis() - startTime) / 1000;
if (elapsed == 0) {
return bytesRead;
} else {
return bytesRead / elapsed;
}
}
/**
* Getter the total time spent in sleep.
* @return Number of milliseconds spent in sleep.
*/
public long getTotalSleepTime() {
return totalSleepTime;
}
/** {@inheritDoc} */
@Override
public String toString() {
return "ThrottledInputStream{" +
"bytesRead=" + bytesRead +
", maxBytesPerSec=" + maxBytesPerSec +
", bytesPerSec=" + getBytesPerSec() +
", totalSleepTime=" + totalSleepTime +
'}';
}
}
| 4,927 | 28.159763 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.util;
/**
* WorkRequest<T> is a simple container for items of class T and its
* corresponding retry counter that indicates how many times this item
* was previously attempted to be processed.
*/
public class WorkRequest<T> {
private int retry;
private T item;
public WorkRequest(T item) {
this(item, 0);
}
/**
* @param item Object representing WorkRequest input data.
* @param retry Number of previous attempts to process this work request.
*/
public WorkRequest(T item, int retry) {
this.item = item;
this.retry = retry;
}
public T getItem() {
return item;
}
/**
* @return Number of previous attempts to process this work request.
*/
public int getRetry() {
return retry;
}
}
| 1,597 | 28.592593 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableDirectoryCreateCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.hadoop.tools.util.RetriableCommand;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.Mapper;
/**
* This class extends Retriable command to implement the creation of directories
* with retries on failure.
*/
public class RetriableDirectoryCreateCommand extends RetriableCommand {
/**
* Constructor, taking a description of the action.
* @param description Verbose description of the copy operation.
*/
public RetriableDirectoryCreateCommand(String description) {
super(description);
}
/**
* Implementation of RetriableCommand::doExecute().
* This implements the actual mkdirs() functionality.
* @param arguments Argument-list to the command.
* @return Boolean. True, if the directory could be created successfully.
* @throws Exception IOException, on failure to create the directory.
*/
@Override
protected Object doExecute(Object... arguments) throws Exception {
assert arguments.length == 2 : "Unexpected argument list.";
Path target = (Path)arguments[0];
Mapper.Context context = (Mapper.Context)arguments[1];
FileSystem targetFS = target.getFileSystem(context.getConfiguration());
return targetFS.mkdirs(target);
}
}
| 2,119 | 36.192982 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.mapred.CopyMapper.FileAction;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.tools.util.RetriableCommand;
import org.apache.hadoop.tools.util.ThrottledInputStream;
import com.google.common.annotations.VisibleForTesting;
/**
* This class extends RetriableCommand to implement the copy of files,
* with retries on failure.
*/
public class RetriableFileCopyCommand extends RetriableCommand {
private static Log LOG = LogFactory.getLog(RetriableFileCopyCommand.class);
private static int BUFFER_SIZE = 8 * 1024;
private boolean skipCrc = false;
private FileAction action;
/**
* Constructor, taking a description of the action.
* @param description Verbose description of the copy operation.
*/
public RetriableFileCopyCommand(String description, FileAction action) {
super(description);
this.action = action;
}
/**
* Create a RetriableFileCopyCommand.
*
* @param skipCrc Whether to skip the crc check.
* @param description A verbose description of the copy operation.
* @param action We should overwrite the target file or append new data to it.
*/
public RetriableFileCopyCommand(boolean skipCrc, String description,
FileAction action) {
this(description, action);
this.skipCrc = skipCrc;
}
/**
* Implementation of RetriableCommand::doExecute().
* This is the actual copy-implementation.
* @param arguments Argument-list to the command.
* @return Number of bytes copied.
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Override
protected Object doExecute(Object... arguments) throws Exception {
assert arguments.length == 4 : "Unexpected argument list.";
FileStatus source = (FileStatus)arguments[0];
assert !source.isDirectory() : "Unexpected file-status. Expected file.";
Path target = (Path)arguments[1];
Mapper.Context context = (Mapper.Context)arguments[2];
EnumSet<FileAttribute> fileAttributes
= (EnumSet<FileAttribute>)arguments[3];
return doCopy(source, target, context, fileAttributes);
}
private long doCopy(FileStatus sourceFileStatus, Path target,
Mapper.Context context, EnumSet<FileAttribute> fileAttributes)
throws IOException {
final boolean toAppend = action == FileAction.APPEND;
Path targetPath = toAppend ? target : getTmpFile(target, context);
final Configuration configuration = context.getConfiguration();
FileSystem targetFS = target.getFileSystem(configuration);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Copying " + sourceFileStatus.getPath() + " to " + target);
LOG.debug("Target file path: " + targetPath);
}
final Path sourcePath = sourceFileStatus.getPath();
final FileSystem sourceFS = sourcePath.getFileSystem(configuration);
final FileChecksum sourceChecksum = fileAttributes
.contains(FileAttribute.CHECKSUMTYPE) ? sourceFS
.getFileChecksum(sourcePath) : null;
final long offset = action == FileAction.APPEND ? targetFS.getFileStatus(
target).getLen() : 0;
long bytesRead = copyToFile(targetPath, targetFS, sourceFileStatus,
offset, context, fileAttributes, sourceChecksum);
compareFileLengths(sourceFileStatus, targetPath, configuration, bytesRead
+ offset);
//At this point, src&dest lengths are same. if length==0, we skip checksum
if ((bytesRead != 0) && (!skipCrc)) {
compareCheckSums(sourceFS, sourceFileStatus.getPath(), sourceChecksum,
targetFS, targetPath);
}
// it's not append case, thus we first write to a temporary file, rename
// it to the target path.
if (!toAppend) {
promoteTmpToTarget(targetPath, target, targetFS);
}
return bytesRead;
} finally {
// note that for append case, it is possible that we append partial data
// and then fail. In that case, for the next retry, we either reuse the
// partial appended data if it is good or we overwrite the whole file
if (!toAppend && targetFS.exists(targetPath)) {
targetFS.delete(targetPath, false);
}
}
}
/**
* @return the checksum spec of the source checksum if checksum type should be
* preserved
*/
private ChecksumOpt getChecksumOpt(EnumSet<FileAttribute> fileAttributes,
FileChecksum sourceChecksum) {
if (fileAttributes.contains(FileAttribute.CHECKSUMTYPE)
&& sourceChecksum != null) {
return sourceChecksum.getChecksumOpt();
}
return null;
}
private long copyToFile(Path targetPath, FileSystem targetFS,
FileStatus sourceFileStatus, long sourceOffset, Mapper.Context context,
EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum)
throws IOException {
FsPermission permission = FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(targetFS.getConf()));
final OutputStream outStream;
if (action == FileAction.OVERWRITE) {
final short repl = getReplicationFactor(fileAttributes, sourceFileStatus,
targetFS, targetPath);
final long blockSize = getBlockSize(fileAttributes, sourceFileStatus,
targetFS, targetPath);
FSDataOutputStream out = targetFS.create(targetPath, permission,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
BUFFER_SIZE, repl, blockSize, context,
getChecksumOpt(fileAttributes, sourceChecksum));
outStream = new BufferedOutputStream(out);
} else {
outStream = new BufferedOutputStream(targetFS.append(targetPath,
BUFFER_SIZE));
}
return copyBytes(sourceFileStatus, sourceOffset, outStream, BUFFER_SIZE,
context);
}
private void compareFileLengths(FileStatus sourceFileStatus, Path target,
Configuration configuration, long targetLen)
throws IOException {
final Path sourcePath = sourceFileStatus.getPath();
FileSystem fs = sourcePath.getFileSystem(configuration);
if (fs.getFileStatus(sourcePath).getLen() != targetLen)
throw new IOException("Mismatch in length of source:" + sourcePath
+ " and target:" + target);
}
private void compareCheckSums(FileSystem sourceFS, Path source,
FileChecksum sourceChecksum, FileSystem targetFS, Path target)
throws IOException {
if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
targetFS, target)) {
StringBuilder errorMessage = new StringBuilder("Check-sum mismatch between ")
.append(source).append(" and ").append(target).append(".");
if (sourceFS.getFileStatus(source).getBlockSize() != targetFS.getFileStatus(target).getBlockSize()) {
errorMessage.append(" Source and target differ in block-size.")
.append(" Use -pb to preserve block-sizes during copy.")
.append(" Alternatively, skip checksum-checks altogether, using -skipCrc.")
.append(" (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.)");
}
throw new IOException(errorMessage.toString());
}
}
//If target file exists and unable to delete target - fail
//If target doesn't exist and unable to create parent folder - fail
//If target is successfully deleted and parent exists, if rename fails - fail
private void promoteTmpToTarget(Path tmpTarget, Path target, FileSystem fs)
throws IOException {
if ((fs.exists(target) && !fs.delete(target, false))
|| (!fs.exists(target.getParent()) && !fs.mkdirs(target.getParent()))
|| !fs.rename(tmpTarget, target)) {
throw new IOException("Failed to promote tmp-file:" + tmpTarget
+ " to: " + target);
}
}
private Path getTmpFile(Path target, Mapper.Context context) {
Path targetWorkPath = new Path(context.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
Path root = target.equals(targetWorkPath)? targetWorkPath.getParent() : targetWorkPath;
LOG.info("Creating temp file: " +
new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString()));
return new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString());
}
@VisibleForTesting
long copyBytes(FileStatus sourceFileStatus, long sourceOffset,
OutputStream outStream, int bufferSize, Mapper.Context context)
throws IOException {
Path source = sourceFileStatus.getPath();
byte buf[] = new byte[bufferSize];
ThrottledInputStream inStream = null;
long totalBytesRead = 0;
try {
inStream = getInputStream(source, context.getConfiguration());
int bytesRead = readBytes(inStream, buf, sourceOffset);
while (bytesRead >= 0) {
totalBytesRead += bytesRead;
if (action == FileAction.APPEND) {
sourceOffset += bytesRead;
}
outStream.write(buf, 0, bytesRead);
updateContextStatus(totalBytesRead, context, sourceFileStatus);
bytesRead = readBytes(inStream, buf, sourceOffset);
}
outStream.close();
outStream = null;
} finally {
IOUtils.cleanup(LOG, outStream, inStream);
}
return totalBytesRead;
}
private void updateContextStatus(long totalBytesRead, Mapper.Context context,
FileStatus sourceFileStatus) {
StringBuilder message = new StringBuilder(DistCpUtils.getFormatter()
.format(totalBytesRead * 100.0f / sourceFileStatus.getLen()));
message.append("% ")
.append(description).append(" [")
.append(DistCpUtils.getStringDescriptionFor(totalBytesRead))
.append('/')
.append(DistCpUtils.getStringDescriptionFor(sourceFileStatus.getLen()))
.append(']');
context.setStatus(message.toString());
}
private static int readBytes(ThrottledInputStream inStream, byte buf[],
long position) throws IOException {
try {
if (position == 0) {
return inStream.read(buf);
} else {
return inStream.read(position, buf, 0, buf.length);
}
} catch (IOException e) {
throw new CopyReadException(e);
}
}
private static ThrottledInputStream getInputStream(Path path,
Configuration conf) throws IOException {
try {
FileSystem fs = path.getFileSystem(conf);
long bandwidthMB = conf.getInt(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
DistCpConstants.DEFAULT_BANDWIDTH_MB);
FSDataInputStream in = fs.open(path);
return new ThrottledInputStream(in, bandwidthMB * 1024 * 1024);
}
catch (IOException e) {
throw new CopyReadException(e);
}
}
private static short getReplicationFactor(
EnumSet<FileAttribute> fileAttributes,
FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
return fileAttributes.contains(FileAttribute.REPLICATION)?
sourceFile.getReplication() : targetFS.getDefaultReplication(tmpTargetPath);
}
/**
* @return the block size of the source file if we need to preserve either
* the block size or the checksum type. Otherwise the default block
* size of the target FS.
*/
private static long getBlockSize(
EnumSet<FileAttribute> fileAttributes,
FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
boolean preserve = fileAttributes.contains(FileAttribute.BLOCKSIZE)
|| fileAttributes.contains(FileAttribute.CHECKSUMTYPE);
return preserve ? sourceFile.getBlockSize() : targetFS
.getDefaultBlockSize(tmpTargetPath);
}
/**
* Special subclass of IOException. This is used to distinguish read-operation
* failures from other kinds of IOExceptions.
* The failure to read from source is dealt with specially, in the CopyMapper.
* Such failures may be skipped if the DistCpOptions indicate so.
* Write failures are intolerable, and amount to CopyMapper failure.
*/
public static class CopyReadException extends IOException {
public CopyReadException(Throwable rootCause) {
super(rootCause);
}
}
}
| 13,939 | 40 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
/**
* UniformSizeInputFormat extends the InputFormat class, to produce
* input-splits for DistCp.
* It looks at the copy-listing and groups the contents into input-splits such
* that the total-number of bytes to be copied for each input split is
* uniform.
*/
public class UniformSizeInputFormat
extends InputFormat<Text, CopyListingFileStatus> {
private static final Log LOG
= LogFactory.getLog(UniformSizeInputFormat.class);
/**
* Implementation of InputFormat::getSplits(). Returns a list of InputSplits,
* such that the number of bytes to be copied for all the splits are
* approximately equal.
* @param context JobContext for the job.
* @return The list of uniformly-distributed input-splits.
* @throws IOException
* @throws InterruptedException
*/
@Override
public List<InputSplit> getSplits(JobContext context)
throws IOException, InterruptedException {
Configuration configuration = context.getConfiguration();
int numSplits = DistCpUtils.getInt(configuration,
JobContext.NUM_MAPS);
if (numSplits == 0) return new ArrayList<InputSplit>();
return getSplits(configuration, numSplits,
DistCpUtils.getLong(configuration,
DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED));
}
private List<InputSplit> getSplits(Configuration configuration, int numSplits,
long totalSizeBytes) throws IOException {
List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
long nBytesPerSplit = (long) Math.ceil(totalSizeBytes * 1.0 / numSplits);
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
long currentSplitSize = 0;
long lastSplitStart = 0;
long lastPosition = 0;
final Path listingFilePath = getListingFilePath(configuration);
if (LOG.isDebugEnabled()) {
LOG.debug("Average bytes per map: " + nBytesPerSplit +
", Number of maps: " + numSplits + ", total size: " + totalSizeBytes);
}
SequenceFile.Reader reader=null;
try {
reader = getListingFileReader(configuration);
while (reader.next(srcRelPath, srcFileStatus)) {
// If adding the current file would cause the bytes per map to exceed
// limit. Add the current file to new split
if (currentSplitSize + srcFileStatus.getLen() > nBytesPerSplit && lastPosition != 0) {
FileSplit split = new FileSplit(listingFilePath, lastSplitStart,
lastPosition - lastSplitStart, null);
if (LOG.isDebugEnabled()) {
LOG.debug ("Creating split : " + split + ", bytes in split: " + currentSplitSize);
}
splits.add(split);
lastSplitStart = lastPosition;
currentSplitSize = 0;
}
currentSplitSize += srcFileStatus.getLen();
lastPosition = reader.getPosition();
}
if (lastPosition > lastSplitStart) {
FileSplit split = new FileSplit(listingFilePath, lastSplitStart,
lastPosition - lastSplitStart, null);
if (LOG.isDebugEnabled()) {
LOG.info ("Creating split : " + split + ", bytes in split: " + currentSplitSize);
}
splits.add(split);
}
} finally {
IOUtils.closeStream(reader);
}
return splits;
}
private static Path getListingFilePath(Configuration configuration) {
final String listingFilePathString =
configuration.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
assert !listingFilePathString.equals("")
: "Couldn't find listing file. Invalid input.";
return new Path(listingFilePathString);
}
private SequenceFile.Reader getListingFileReader(Configuration configuration) {
final Path listingFilePath = getListingFilePath(configuration);
try {
final FileSystem fileSystem = listingFilePath.getFileSystem(configuration);
if (!fileSystem.exists(listingFilePath))
throw new IllegalArgumentException("Listing file doesn't exist at: "
+ listingFilePath);
return new SequenceFile.Reader(configuration,
SequenceFile.Reader.file(listingFilePath));
}
catch (IOException exception) {
LOG.error("Couldn't find listing file at: " + listingFilePath, exception);
throw new IllegalArgumentException("Couldn't find listing-file at: "
+ listingFilePath, exception);
}
}
/**
* Implementation of InputFormat::createRecordReader().
* @param split The split for which the RecordReader is sought.
* @param context The context of the current task-attempt.
* @return A SequenceFileRecordReader instance, (since the copy-listing is a
* simple sequence-file.)
* @throws IOException
* @throws InterruptedException
*/
@Override
public RecordReader<Text, CopyListingFileStatus> createRecordReader(
InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
return new SequenceFileRecordReader<Text, CopyListingFileStatus>();
}
}
| 6,793 | 38.730994 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptionSwitch;
import org.apache.hadoop.tools.DistCpOptions;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.util.StringUtils;
/**
* Mapper class that executes the DistCp copy operation.
* Implements the o.a.h.mapreduce.Mapper interface.
*/
public class CopyMapper extends Mapper<Text, CopyListingFileStatus, Text, Text> {
/**
* Hadoop counters for the DistCp CopyMapper.
* (These have been kept identical to the old DistCp,
* for backward compatibility.)
*/
public static enum Counter {
COPY, // Number of files received by the mapper for copy.
SKIP, // Number of files skipped.
FAIL, // Number of files that failed to be copied.
BYTESCOPIED, // Number of bytes actually copied by the copy-mapper, total.
BYTESEXPECTED,// Number of bytes expected to be copied.
BYTESFAILED, // Number of bytes that failed to be copied.
BYTESSKIPPED, // Number of bytes that were skipped from copy.
}
/**
* Indicate the action for each file
*/
static enum FileAction {
SKIP, // Skip copying the file since it's already in the target FS
APPEND, // Only need to append new data to the file in the target FS
OVERWRITE, // Overwrite the whole file
}
private static Log LOG = LogFactory.getLog(CopyMapper.class);
private Configuration conf;
private boolean syncFolders = false;
private boolean ignoreFailures = false;
private boolean skipCrc = false;
private boolean overWrite = false;
private boolean append = false;
private EnumSet<FileAttribute> preserve = EnumSet.noneOf(FileAttribute.class);
private FileSystem targetFS = null;
private Path targetWorkPath = null;
/**
* Implementation of the Mapper::setup() method. This extracts the DistCp-
* options specified in the Job's configuration, to set up the Job.
* @param context Mapper's context.
* @throws IOException On IO failure.
* @throws InterruptedException If the job is interrupted.
*/
@Override
public void setup(Context context) throws IOException, InterruptedException {
conf = context.getConfiguration();
syncFolders = conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false);
ignoreFailures = conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false);
skipCrc = conf.getBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), false);
overWrite = conf.getBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), false);
append = conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(), false);
preserve = DistCpUtils.unpackAttributes(conf.get(DistCpOptionSwitch.
PRESERVE_STATUS.getConfigLabel()));
targetWorkPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
Path targetFinalPath = new Path(conf.get(
DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
targetFS = targetFinalPath.getFileSystem(conf);
if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) {
overWrite = true; // When target is an existing file, overwrite it.
}
if (conf.get(DistCpConstants.CONF_LABEL_SSL_CONF) != null) {
initializeSSLConf(context);
}
}
/**
* Initialize SSL Config if same is set in conf
*
* @throws IOException - If any
*/
private void initializeSSLConf(Context context) throws IOException {
LOG.info("Initializing SSL configuration");
String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work";
Path[] cacheFiles = context.getLocalCacheFiles();
Configuration sslConfig = new Configuration(false);
String sslConfFileName = conf.get(DistCpConstants.CONF_LABEL_SSL_CONF);
Path sslClient = findCacheFile(cacheFiles, sslConfFileName);
if (sslClient == null) {
LOG.warn("SSL Client config file not found. Was looking for " + sslConfFileName +
" in " + Arrays.toString(cacheFiles));
return;
}
sslConfig.addResource(sslClient);
String trustStoreFile = conf.get("ssl.client.truststore.location");
Path trustStorePath = findCacheFile(cacheFiles, trustStoreFile);
sslConfig.set("ssl.client.truststore.location", trustStorePath.toString());
String keyStoreFile = conf.get("ssl.client.keystore.location");
Path keyStorePath = findCacheFile(cacheFiles, keyStoreFile);
sslConfig.set("ssl.client.keystore.location", keyStorePath.toString());
try {
OutputStream out = new FileOutputStream(workDir + "/" + sslConfFileName);
try {
sslConfig.writeXml(out);
} finally {
out.close();
}
conf.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfFileName);
} catch (IOException e) {
LOG.warn("Unable to write out the ssl configuration. " +
"Will fall back to default ssl-client.xml in class path, if there is one", e);
}
}
/**
* Find entry from distributed cache
*
* @param cacheFiles - All localized cache files
* @param fileName - fileName to search
* @return Path of the filename if found, else null
*/
private Path findCacheFile(Path[] cacheFiles, String fileName) {
if (cacheFiles != null && cacheFiles.length > 0) {
for (Path file : cacheFiles) {
if (file.getName().equals(fileName)) {
return file;
}
}
}
return null;
}
/**
* Implementation of the Mapper::map(). Does the copy.
* @param relPath The target path.
* @param sourceFileStatus The source path.
* @throws IOException
* @throws InterruptedException
*/
@Override
public void map(Text relPath, CopyListingFileStatus sourceFileStatus,
Context context) throws IOException, InterruptedException {
Path sourcePath = sourceFileStatus.getPath();
if (LOG.isDebugEnabled())
LOG.debug("DistCpMapper::map(): Received " + sourcePath + ", " + relPath);
Path target = new Path(targetWorkPath.makeQualified(targetFS.getUri(),
targetFS.getWorkingDirectory()) + relPath.toString());
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= getFileAttributeSettings(context);
final boolean preserveRawXattrs = context.getConfiguration().getBoolean(
DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
final String description = "Copying " + sourcePath + " to " + target;
context.setStatus(description);
LOG.info(description);
try {
CopyListingFileStatus sourceCurrStatus;
FileSystem sourceFS;
try {
sourceFS = sourcePath.getFileSystem(conf);
final boolean preserveXAttrs =
fileAttributes.contains(FileAttribute.XATTR);
sourceCurrStatus = DistCpUtils.toCopyListingFileStatus(sourceFS,
sourceFS.getFileStatus(sourcePath),
fileAttributes.contains(FileAttribute.ACL),
preserveXAttrs, preserveRawXattrs);
} catch (FileNotFoundException e) {
throw new IOException(new RetriableFileCopyCommand.CopyReadException(e));
}
FileStatus targetStatus = null;
try {
targetStatus = targetFS.getFileStatus(target);
} catch (FileNotFoundException ignore) {
if (LOG.isDebugEnabled())
LOG.debug("Path could not be found: " + target, ignore);
}
if (targetStatus != null && (targetStatus.isDirectory() != sourceCurrStatus.isDirectory())) {
throw new IOException("Can't replace " + target + ". Target is " +
getFileType(targetStatus) + ", Source is " + getFileType(sourceCurrStatus));
}
if (sourceCurrStatus.isDirectory()) {
createTargetDirsWithRetry(description, target, context);
return;
}
FileAction action = checkUpdate(sourceFS, sourceCurrStatus, target);
if (action == FileAction.SKIP) {
LOG.info("Skipping copy of " + sourceCurrStatus.getPath()
+ " to " + target);
updateSkipCounters(context, sourceCurrStatus);
context.write(null, new Text("SKIP: " + sourceCurrStatus.getPath()));
} else {
copyFileWithRetry(description, sourceCurrStatus, target, context,
action, fileAttributes);
}
DistCpUtils.preserve(target.getFileSystem(conf), target, sourceCurrStatus,
fileAttributes, preserveRawXattrs);
} catch (IOException exception) {
handleFailures(exception, sourceFileStatus, target, context);
}
}
private String getFileType(FileStatus fileStatus) {
return fileStatus == null ? "N/A" : (fileStatus.isDirectory() ? "dir" : "file");
}
private static EnumSet<DistCpOptions.FileAttribute>
getFileAttributeSettings(Mapper.Context context) {
String attributeString = context.getConfiguration().get(
DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel());
return DistCpUtils.unpackAttributes(attributeString);
}
private void copyFileWithRetry(String description,
FileStatus sourceFileStatus, Path target, Context context,
FileAction action, EnumSet<DistCpOptions.FileAttribute> fileAttributes)
throws IOException {
long bytesCopied;
try {
bytesCopied = (Long) new RetriableFileCopyCommand(skipCrc, description,
action).execute(sourceFileStatus, target, context, fileAttributes);
} catch (Exception e) {
context.setStatus("Copy Failure: " + sourceFileStatus.getPath());
throw new IOException("File copy failed: " + sourceFileStatus.getPath() +
" --> " + target, e);
}
incrementCounter(context, Counter.BYTESEXPECTED, sourceFileStatus.getLen());
incrementCounter(context, Counter.BYTESCOPIED, bytesCopied);
incrementCounter(context, Counter.COPY, 1);
}
private void createTargetDirsWithRetry(String description,
Path target, Context context) throws IOException {
try {
new RetriableDirectoryCreateCommand(description).execute(target, context);
} catch (Exception e) {
throw new IOException("mkdir failed for " + target, e);
}
incrementCounter(context, Counter.COPY, 1);
}
private static void updateSkipCounters(Context context,
FileStatus sourceFile) {
incrementCounter(context, Counter.SKIP, 1);
incrementCounter(context, Counter.BYTESSKIPPED, sourceFile.getLen());
}
private void handleFailures(IOException exception,
FileStatus sourceFileStatus, Path target,
Context context) throws IOException, InterruptedException {
LOG.error("Failure in copying " + sourceFileStatus.getPath() + " to " +
target, exception);
if (ignoreFailures && exception.getCause() instanceof
RetriableFileCopyCommand.CopyReadException) {
incrementCounter(context, Counter.FAIL, 1);
incrementCounter(context, Counter.BYTESFAILED, sourceFileStatus.getLen());
context.write(null, new Text("FAIL: " + sourceFileStatus.getPath() + " - " +
StringUtils.stringifyException(exception)));
}
else
throw exception;
}
private static void incrementCounter(Context context, Counter counter,
long value) {
context.getCounter(counter).increment(value);
}
private FileAction checkUpdate(FileSystem sourceFS, FileStatus source,
Path target) throws IOException {
final FileStatus targetFileStatus;
try {
targetFileStatus = targetFS.getFileStatus(target);
} catch (FileNotFoundException e) {
return FileAction.OVERWRITE;
}
if (targetFileStatus != null && !overWrite) {
if (canSkip(sourceFS, source, targetFileStatus)) {
return FileAction.SKIP;
} else if (append) {
long targetLen = targetFileStatus.getLen();
if (targetLen < source.getLen()) {
FileChecksum sourceChecksum = sourceFS.getFileChecksum(
source.getPath(), targetLen);
if (sourceChecksum != null
&& sourceChecksum.equals(targetFS.getFileChecksum(target))) {
// We require that the checksum is not null. Thus currently only
// DistributedFileSystem is supported
return FileAction.APPEND;
}
}
}
}
return FileAction.OVERWRITE;
}
private boolean canSkip(FileSystem sourceFS, FileStatus source,
FileStatus target) throws IOException {
if (!syncFolders) {
return true;
}
boolean sameLength = target.getLen() == source.getLen();
boolean sameBlockSize = source.getBlockSize() == target.getBlockSize()
|| !preserve.contains(FileAttribute.BLOCKSIZE);
if (sameLength && sameBlockSize) {
return skipCrc ||
DistCpUtils.checksumsAreEqual(sourceFS, source.getPath(), null,
targetFS, target.getPath());
} else {
return false;
}
}
}
| 14,506 | 37.480106 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.tools.DistCpConstants;
import java.io.IOException;
/**
* The CopyOutputFormat is the Hadoop OutputFormat used in DistCp.
* It sets up the Job's Configuration (in the Job-Context) with the settings
* for the work-directory, final commit-directory, etc. It also sets the right
* output-committer.
* @param <K>
* @param <V>
*/
public class CopyOutputFormat<K, V> extends TextOutputFormat<K, V> {
/**
* Setter for the working directory for DistCp (where files will be copied
* before they are moved to the final commit-directory.)
* @param job The Job on whose configuration the working-directory is to be set.
* @param workingDirectory The path to use as the working directory.
*/
public static void setWorkingDirectory(Job job, Path workingDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workingDirectory.toString());
}
/**
* Setter for the final directory for DistCp (where files copied will be
* moved, atomically.)
* @param job The Job on whose configuration the working-directory is to be set.
* @param commitDirectory The path to use for final commit.
*/
public static void setCommitDirectory(Job job, Path commitDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
commitDirectory.toString());
}
/**
* Getter for the working directory.
* @param job The Job from whose configuration the working-directory is to
* be retrieved.
* @return The working-directory Path.
*/
public static Path getWorkingDirectory(Job job) {
return getWorkingDirectory(job.getConfiguration());
}
private static Path getWorkingDirectory(Configuration conf) {
String workingDirectory = conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH);
if (workingDirectory == null || workingDirectory.isEmpty()) {
return null;
} else {
return new Path(workingDirectory);
}
}
/**
* Getter for the final commit-directory.
* @param job The Job from whose configuration the commit-directory is to be
* retrieved.
* @return The commit-directory Path.
*/
public static Path getCommitDirectory(Job job) {
return getCommitDirectory(job.getConfiguration());
}
private static Path getCommitDirectory(Configuration conf) {
String commitDirectory = conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH);
if (commitDirectory == null || commitDirectory.isEmpty()) {
return null;
} else {
return new Path(commitDirectory);
}
}
/** {@inheritDoc} */
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
return new CopyCommitter(getOutputPath(context), context);
}
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
Configuration conf = context.getConfiguration();
if (getCommitDirectory(conf) == null) {
throw new IllegalStateException("Commit directory not configured");
}
Path workingPath = getWorkingDirectory(conf);
if (workingPath == null) {
throw new IllegalStateException("Working directory not configured");
}
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(context.getCredentials(),
new Path[] {workingPath}, conf);
}
}
| 4,499 | 35 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.tools.*;
import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
import org.apache.hadoop.tools.util.DistCpUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
/**
* The CopyCommitter class is DistCp's OutputCommitter implementation. It is
* responsible for handling the completion/cleanup of the DistCp run.
* Specifically, it does the following:
* 1. Cleanup of the meta-folder (where DistCp maintains its file-list, etc.)
* 2. Preservation of user/group/replication-factor on any directories that
* have been copied. (Files are taken care of in their map-tasks.)
* 3. Atomic-move of data from the temporary work-folder to the final path
* (if atomic-commit was opted for).
* 4. Deletion of files from the target that are missing at source (if opted for).
* 5. Cleanup of any partially copied files, from previous, failed attempts.
*/
public class CopyCommitter extends FileOutputCommitter {
private static final Log LOG = LogFactory.getLog(CopyCommitter.class);
private final TaskAttemptContext taskAttemptContext;
private boolean syncFolder = false;
private boolean overwrite = false;
private boolean targetPathExists = true;
/**
* Create a output committer
*
* @param outputPath the job's output path
* @param context the task's context
* @throws IOException - Exception if any
*/
public CopyCommitter(Path outputPath, TaskAttemptContext context) throws IOException {
super(outputPath, context);
this.taskAttemptContext = context;
}
/** {@inheritDoc} */
@Override
public void commitJob(JobContext jobContext) throws IOException {
Configuration conf = jobContext.getConfiguration();
syncFolder = conf.getBoolean(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, false);
overwrite = conf.getBoolean(DistCpConstants.CONF_LABEL_OVERWRITE, false);
targetPathExists = conf.getBoolean(DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS, true);
super.commitJob(jobContext);
cleanupTempFiles(jobContext);
String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
final boolean preserveRawXattrs =
conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) {
preserveFileAttributesForDirectories(conf);
}
try {
if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) {
deleteMissing(conf);
} else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) {
commitData(conf);
}
taskAttemptContext.setStatus("Commit Successful");
}
finally {
cleanup(conf);
}
}
/** {@inheritDoc} */
@Override
public void abortJob(JobContext jobContext,
JobStatus.State state) throws IOException {
try {
super.abortJob(jobContext, state);
} finally {
cleanupTempFiles(jobContext);
cleanup(jobContext.getConfiguration());
}
}
private void cleanupTempFiles(JobContext context) {
try {
Configuration conf = context.getConfiguration();
Path targetWorkPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
FileSystem targetFS = targetWorkPath.getFileSystem(conf);
String jobId = context.getJobID().toString();
deleteAttemptTempFiles(targetWorkPath, targetFS, jobId);
deleteAttemptTempFiles(targetWorkPath.getParent(), targetFS, jobId);
} catch (Throwable t) {
LOG.warn("Unable to cleanup temp files", t);
}
}
private void deleteAttemptTempFiles(Path targetWorkPath,
FileSystem targetFS,
String jobId) throws IOException {
if (targetWorkPath == null) {
return;
}
FileStatus[] tempFiles = targetFS.globStatus(
new Path(targetWorkPath, ".distcp.tmp." + jobId.replaceAll("job","attempt") + "*"));
if (tempFiles != null && tempFiles.length > 0) {
for (FileStatus file : tempFiles) {
LOG.info("Cleaning up " + file.getPath());
targetFS.delete(file.getPath(), false);
}
}
}
/**
* Cleanup meta folder and other temporary files
*
* @param conf - Job Configuration
*/
private void cleanup(Configuration conf) {
Path metaFolder = new Path(conf.get(DistCpConstants.CONF_LABEL_META_FOLDER));
try {
FileSystem fs = metaFolder.getFileSystem(conf);
LOG.info("Cleaning up temporary work folder: " + metaFolder);
fs.delete(metaFolder, true);
} catch (IOException ignore) {
LOG.error("Exception encountered ", ignore);
}
}
// This method changes the target-directories' file-attributes (owner,
// user/group permissions, etc.) based on the corresponding source directories.
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
final boolean syncOrOverwrite = syncFolder || overwrite;
LOG.info("About to preserve attributes: " + attrSymbols);
EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
final boolean preserveRawXattrs =
conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
FileSystem clusterFS = sourceListing.getFileSystem(conf);
SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(sourceListing));
long totalLen = clusterFS.getFileStatus(sourceListing).getLen();
Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
long preservedEntries = 0;
try {
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
// Iterate over every source path that was copied.
while (sourceReader.next(srcRelPath, srcFileStatus)) {
// File-attributes for files are set at the time of copy,
// in the map-task.
if (! srcFileStatus.isDirectory()) continue;
Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
//
// Skip the root folder when syncOrOverwrite is true.
//
if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;
FileSystem targetFS = targetFile.getFileSystem(conf);
DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
preserveRawXattrs);
taskAttemptContext.progress();
taskAttemptContext.setStatus("Preserving status on directory entries. [" +
sourceReader.getPosition() * 100 / totalLen + "%]");
}
} finally {
IOUtils.closeStream(sourceReader);
}
LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
// This method deletes "extra" files from the target, if they're not
// available at the source.
private void deleteMissing(Configuration conf) throws IOException {
LOG.info("-delete option is enabled. About to remove entries from " +
"target that are missing in source");
// Sort the source-file listing alphabetically.
Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
FileSystem clusterFS = sourceListing.getFileSystem(conf);
Path sortedSourceListing = DistCpUtils.sortListing(clusterFS, conf, sourceListing);
// Similarly, create the listing of target-files. Sort alphabetically.
Path targetListing = new Path(sourceListing.getParent(), "targetListing.seq");
CopyListing target = new GlobbedCopyListing(new Configuration(conf), null);
List<Path> targets = new ArrayList<Path>(1);
Path targetFinalPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
targets.add(targetFinalPath);
DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
//
// Set up options to be the same from the CopyListing.buildListing's perspective,
// so to collect similar listings as when doing the copy
//
options.setOverwrite(overwrite);
options.setSyncFolder(syncFolder);
options.setTargetPathExists(targetPathExists);
target.buildListing(targetListing, options);
Path sortedTargetListing = DistCpUtils.sortListing(clusterFS, conf, targetListing);
long totalLen = clusterFS.getFileStatus(sortedTargetListing).getLen();
SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(sortedSourceListing));
SequenceFile.Reader targetReader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(sortedTargetListing));
// Walk both source and target file listings.
// Delete all from target that doesn't also exist on source.
long deletedEntries = 0;
try {
CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
Text srcRelPath = new Text();
CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus();
Text trgtRelPath = new Text();
FileSystem targetFS = targetFinalPath.getFileSystem(conf);
boolean srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
while (targetReader.next(trgtRelPath, trgtFileStatus)) {
// Skip sources that don't exist on target.
while (srcAvailable && trgtRelPath.compareTo(srcRelPath) > 0) {
srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
}
if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue;
// Target doesn't exist at source. Delete.
boolean result = (!targetFS.exists(trgtFileStatus.getPath()) ||
targetFS.delete(trgtFileStatus.getPath(), true));
if (result) {
LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source");
deletedEntries++;
} else {
throw new IOException("Unable to delete " + trgtFileStatus.getPath());
}
taskAttemptContext.progress();
taskAttemptContext.setStatus("Deleting missing files from target. [" +
targetReader.getPosition() * 100 / totalLen + "%]");
}
} finally {
IOUtils.closeStream(sourceReader);
IOUtils.closeStream(targetReader);
}
LOG.info("Deleted " + deletedEntries + " from target: " + targets.get(0));
}
private void commitData(Configuration conf) throws IOException {
Path workDir = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
Path finalDir = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
FileSystem targetFS = workDir.getFileSystem(conf);
LOG.info("Atomic commit enabled. Moving " + workDir + " to " + finalDir);
if (targetFS.exists(finalDir) && targetFS.exists(workDir)) {
LOG.error("Pre-existing final-path found at: " + finalDir);
throw new IOException("Target-path can't be committed to because it " +
"exists at " + finalDir + ". Copied data is in temp-dir: " + workDir + ". ");
}
boolean result = targetFS.rename(workDir, finalDir);
if (!result) {
LOG.warn("Rename failed. Perhaps data already moved. Verifying...");
result = targetFS.exists(finalDir) && !targetFS.exists(workDir);
}
if (result) {
LOG.info("Data committed successfully to " + finalDir);
taskAttemptContext.setStatus("Data committed successfully to " + finalDir);
} else {
LOG.error("Unable to commit data to " + finalDir);
throw new IOException("Atomic commit failed. Temporary data in " + workDir +
", Unable to move to " + finalDir);
}
}
}
| 13,142 | 40.071875 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred.lib;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskID;
import java.io.IOException;
/**
* The DynamicInputChunk represents a single chunk of work, when used in
* conjunction with the DynamicInputFormat and the DynamicRecordReader.
* The records in the DynamicInputFormat's input-file are split across various
* DynamicInputChunks. Each one is claimed and processed in an iteration of
* a dynamic-mapper. When a DynamicInputChunk has been exhausted, the faster
* mapper may claim another and process it, until there are no more to be
* consumed.
*/
class DynamicInputChunk<K, V> {
private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
private static Configuration configuration;
private static Path chunkRootPath;
private static String chunkFilePrefix;
private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
private static FileSystem fs;
private Path chunkFilePath;
private SequenceFileRecordReader<K, V> reader;
private SequenceFile.Writer writer;
private static void initializeChunkInvariants(Configuration config)
throws IOException {
configuration = config;
Path listingFilePath = new Path(getListingFilePath(configuration));
chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
fs = chunkRootPath.getFileSystem(configuration);
chunkFilePrefix = listingFilePath.getName() + ".chunk.";
}
private static String getListingFilePath(Configuration configuration) {
final String listingFileString = configuration.get(
DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
assert !listingFileString.equals("") : "Listing file not found.";
return listingFileString;
}
private static boolean areInvariantsInitialized() {
return chunkRootPath != null;
}
private DynamicInputChunk(String chunkId, Configuration configuration)
throws IOException {
if (!areInvariantsInitialized())
initializeChunkInvariants(configuration);
chunkFilePath = new Path(chunkRootPath, chunkFilePrefix + chunkId);
openForWrite();
}
private void openForWrite() throws IOException {
writer = SequenceFile.createWriter(
chunkFilePath.getFileSystem(configuration), configuration,
chunkFilePath, Text.class, CopyListingFileStatus.class,
SequenceFile.CompressionType.NONE);
}
/**
* Factory method to create chunk-files for writing to.
* (For instance, when the DynamicInputFormat splits the input-file into
* chunks.)
* @param chunkId String to identify the chunk.
* @param configuration Configuration, describing the location of the listing-
* file, file-system for the map-job, etc.
* @return A DynamicInputChunk, corresponding to a chunk-file, with the name
* incorporating the chunk-id.
* @throws IOException Exception on failure to create the chunk.
*/
public static DynamicInputChunk createChunkForWrite(String chunkId,
Configuration configuration) throws IOException {
return new DynamicInputChunk(chunkId, configuration);
}
/**
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
* @throws IOException Exception onf failure to write to the file.
*/
public void write(Text key, CopyListingFileStatus value) throws IOException {
writer.append(key, value);
}
/**
* Closes streams opened to the chunk-file.
*/
public void close() {
IOUtils.cleanup(LOG, reader, writer);
}
/**
* Reassigns the chunk to a specified Map-Task, for consumption.
* @param taskId The Map-Task to which a the chunk is to be reassigned.
* @throws IOException Exception on failure to reassign.
*/
public void assignTo(TaskID taskId) throws IOException {
Path newPath = new Path(chunkRootPath, taskId.toString());
if (!fs.rename(chunkFilePath, newPath)) {
LOG.warn(chunkFilePath + " could not be assigned to " + taskId);
}
}
private DynamicInputChunk(Path chunkFilePath,
TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
if (!areInvariantsInitialized())
initializeChunkInvariants(taskAttemptContext.getConfiguration());
this.chunkFilePath = chunkFilePath;
openForRead(taskAttemptContext);
}
private void openForRead(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
reader = new SequenceFileRecordReader<K, V>();
reader.initialize(new FileSplit(chunkFilePath, 0,
DistCpUtils.getFileSize(chunkFilePath, configuration), null),
taskAttemptContext);
}
/**
* Factory method that
* 1. acquires a chunk for the specified map-task attempt
* 2. returns a DynamicInputChunk associated with the acquired chunk-file.
* @param taskAttemptContext The attempt-context for the map task that's
* trying to acquire a chunk.
* @return The acquired dynamic-chunk. The chunk-file is renamed to the
* attempt-id (from the attempt-context.)
* @throws IOException Exception on failure.
* @throws InterruptedException Exception on failure.
*/
public static DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
if (!areInvariantsInitialized())
initializeChunkInvariants(taskAttemptContext.getConfiguration());
String taskId
= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
Path acquiredFilePath = new Path(chunkRootPath, taskId);
if (fs.exists(acquiredFilePath)) {
LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
}
for (FileStatus chunkFile : getListOfChunkFiles()) {
if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
LOG.info(taskId + " acquired " + chunkFile.getPath());
return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
}
else
LOG.warn(taskId + " could not acquire " + chunkFile.getPath());
}
return null;
}
/**
* Method to be called to relinquish an acquired chunk. All streams open to
* the chunk are closed, and the chunk-file is deleted.
* @throws IOException Exception thrown on failure to release (i.e. delete)
* the chunk file.
*/
public void release() throws IOException {
close();
if (!fs.delete(chunkFilePath, false)) {
LOG.error("Unable to release chunk at path: " + chunkFilePath);
throw new IOException("Unable to release chunk at path: " + chunkFilePath);
}
}
static FileStatus [] getListOfChunkFiles() throws IOException {
Path chunkFilePattern = new Path(chunkRootPath, chunkFilePrefix + "*");
FileStatus chunkFiles[] = fs.globStatus(chunkFilePattern);
numChunksLeft = chunkFiles.length;
return chunkFiles;
}
/**
* Getter for the chunk-file's path, on HDFS.
* @return The qualified path to the chunk-file.
*/
public Path getPath() {
return chunkFilePath;
}
/**
* Getter for the record-reader, opened to the chunk-file.
* @return Opened Sequence-file reader.
*/
public SequenceFileRecordReader<K,V> getReader() {
assert reader != null : "Reader un-initialized!";
return reader;
}
/**
* Getter for the number of chunk-files left in the chunk-file directory.
* Useful to determine how many chunks (and hence, records) are left to be
* processed.
* @return Before the first scan of the directory, the number returned is -1.
* Otherwise, the number of chunk-files seen from the last scan is returned.
*/
public static int getNumChunksLeft() {
return numChunksLeft;
}
}
| 9,473 | 37.201613 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred.lib;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* The DynamicRecordReader is used in conjunction with the DynamicInputFormat
* to implement the "Worker pattern" for DistCp.
* The DynamicRecordReader is responsible for:
* 1. Presenting the contents of each chunk to DistCp's mapper.
* 2. Acquiring a new chunk when the current chunk has been completely consumed,
* transparently.
*/
public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
private static final Log LOG = LogFactory.getLog(DynamicRecordReader.class);
private TaskAttemptContext taskAttemptContext;
private Configuration configuration;
private DynamicInputChunk<K, V> chunk;
private TaskID taskId;
// Data required for progress indication.
private int numRecordsPerChunk; // Constant per job.
private int totalNumRecords; // Constant per job.
private int numRecordsProcessedByThisMap = 0;
private long timeOfLastChunkDirScan = 0;
private boolean isChunkDirAlreadyScanned = false;
private static long TIME_THRESHOLD_FOR_DIR_SCANS = TimeUnit.MINUTES.toMillis(5);
/**
* Implementation for RecordReader::initialize(). Initializes the internal
* RecordReader to read from chunks.
* @param inputSplit The InputSplit for the map. Ignored entirely.
* @param taskAttemptContext The AttemptContext.
* @throws IOException
* @throws InterruptedException
*/
@Override
public void initialize(InputSplit inputSplit,
TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
numRecordsPerChunk = DynamicInputFormat.getNumEntriesPerChunk(
taskAttemptContext.getConfiguration());
this.taskAttemptContext = taskAttemptContext;
configuration = taskAttemptContext.getConfiguration();
taskId = taskAttemptContext.getTaskAttemptID().getTaskID();
chunk = DynamicInputChunk.acquire(this.taskAttemptContext);
timeOfLastChunkDirScan = System.currentTimeMillis();
isChunkDirAlreadyScanned = false;
totalNumRecords = getTotalNumRecords();
}
private int getTotalNumRecords() {
return DistCpUtils.getInt(configuration,
DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS);
}
/**
* Implementation of RecordReader::nextValue().
* Reads the contents of the current chunk and returns them. When a chunk has
* been completely exhausted, an new chunk is acquired and read,
* transparently.
* @return True, if the nextValue() could be traversed to. False, otherwise.
* @throws IOException
* @throws InterruptedException
*/
@Override
public boolean nextKeyValue()
throws IOException, InterruptedException {
if (chunk == null) {
if (LOG.isDebugEnabled())
LOG.debug(taskId + ": RecordReader is null. No records to be read.");
return false;
}
if (chunk.getReader().nextKeyValue()) {
++numRecordsProcessedByThisMap;
return true;
}
if (LOG.isDebugEnabled())
LOG.debug(taskId + ": Current chunk exhausted. " +
" Attempting to pick up new one.");
chunk.release();
timeOfLastChunkDirScan = System.currentTimeMillis();
isChunkDirAlreadyScanned = false;
chunk = DynamicInputChunk.acquire(taskAttemptContext);
if (chunk == null) return false;
if (chunk.getReader().nextKeyValue()) {
++numRecordsProcessedByThisMap;
return true;
}
else {
return false;
}
}
/**
* Implementation of RecordReader::getCurrentKey().
* @return The key of the current record. (i.e. the source-path.)
* @throws IOException
* @throws InterruptedException
*/
@Override
public K getCurrentKey()
throws IOException, InterruptedException {
return chunk.getReader().getCurrentKey();
}
/**
* Implementation of RecordReader::getCurrentValue().
* @return The value of the current record. (i.e. the target-path.)
* @throws IOException
* @throws InterruptedException
*/
@Override
public V getCurrentValue()
throws IOException, InterruptedException {
return chunk.getReader().getCurrentValue();
}
/**
* Implementation of RecordReader::getProgress().
* @return A fraction [0.0,1.0] indicating the progress of a DistCp mapper.
* @throws IOException
* @throws InterruptedException
*/
@Override
public float getProgress()
throws IOException, InterruptedException {
final int numChunksLeft = getNumChunksLeft();
if (numChunksLeft < 0) {// Un-initialized. i.e. Before 1st dir-scan.
assert numRecordsProcessedByThisMap <= numRecordsPerChunk
: "numRecordsProcessedByThisMap:" + numRecordsProcessedByThisMap +
" exceeds numRecordsPerChunk:" + numRecordsPerChunk;
return ((float) numRecordsProcessedByThisMap) / totalNumRecords;
// Conservative estimate, till the first directory scan.
}
return ((float) numRecordsProcessedByThisMap)
/(numRecordsProcessedByThisMap + numRecordsPerChunk*numChunksLeft);
}
private int getNumChunksLeft() throws IOException {
long now = System.currentTimeMillis();
boolean tooLongSinceLastDirScan
= now - timeOfLastChunkDirScan > TIME_THRESHOLD_FOR_DIR_SCANS;
if (tooLongSinceLastDirScan
|| (!isChunkDirAlreadyScanned &&
numRecordsProcessedByThisMap%numRecordsPerChunk
> numRecordsPerChunk/2)) {
DynamicInputChunk.getListOfChunkFiles();
isChunkDirAlreadyScanned = true;
timeOfLastChunkDirScan = now;
}
return DynamicInputChunk.getNumChunksLeft();
}
/**
* Implementation of RecordReader::close().
* Closes the RecordReader.
* @throws IOException
*/
@Override
public void close()
throws IOException {
if (chunk != null)
chunk.close();
}
}
| 7,038 | 33.504902 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.mapred.lib;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.util.DistCpUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.CopyListingFileStatus;
import java.util.List;
import java.util.ArrayList;
import java.io.IOException;
/**
* DynamicInputFormat implements the "Worker pattern" for DistCp.
* Rather than to split up the copy-list into a set of static splits,
* the DynamicInputFormat does the following:
* 1. Splits the copy-list into small chunks on the DFS.
* 2. Creates a set of empty "dynamic" splits, that each consume as many chunks
* as it can.
* This arrangement ensures that a single slow mapper won't slow down the entire
* job (since the slack will be picked up by other mappers, who consume more
* chunks.)
* By varying the split-ratio, one can vary chunk sizes to achieve different
* performance characteristics.
*/
public class DynamicInputFormat<K, V> extends InputFormat<K, V> {
private static final Log LOG = LogFactory.getLog(DynamicInputFormat.class);
private static final String CONF_LABEL_LISTING_SPLIT_RATIO
= "mapred.listing.split.ratio";
private static final String CONF_LABEL_NUM_SPLITS
= "mapred.num.splits";
private static final String CONF_LABEL_NUM_ENTRIES_PER_CHUNK
= "mapred.num.entries.per.chunk";
/**
* Implementation of InputFormat::getSplits(). This method splits up the
* copy-listing file into chunks, and assigns the first batch to different
* tasks.
* @param jobContext JobContext for the map job.
* @return The list of (empty) dynamic input-splits.
* @throws IOException
* @throws InterruptedException
*/
@Override
public List<InputSplit> getSplits(JobContext jobContext)
throws IOException, InterruptedException {
LOG.info("DynamicInputFormat: Getting splits for job:"
+ jobContext.getJobID());
return createSplits(jobContext,
splitCopyListingIntoChunksWithShuffle(jobContext));
}
private List<InputSplit> createSplits(JobContext jobContext,
List<DynamicInputChunk> chunks)
throws IOException {
int numMaps = getNumMapTasks(jobContext.getConfiguration());
final int nSplits = Math.min(numMaps, chunks.size());
List<InputSplit> splits = new ArrayList<InputSplit>(nSplits);
for (int i=0; i< nSplits; ++i) {
TaskID taskId = new TaskID(jobContext.getJobID(), TaskType.MAP, i);
chunks.get(i).assignTo(taskId);
splits.add(new FileSplit(chunks.get(i).getPath(), 0,
// Setting non-zero length for FileSplit size, to avoid a possible
// future when 0-sized file-splits are considered "empty" and skipped
// over.
getMinRecordsPerChunk(jobContext.getConfiguration()),
null));
}
DistCpUtils.publish(jobContext.getConfiguration(),
CONF_LABEL_NUM_SPLITS, splits.size());
return splits;
}
private static int N_CHUNKS_OPEN_AT_ONCE_DEFAULT = 16;
private List<DynamicInputChunk> splitCopyListingIntoChunksWithShuffle
(JobContext context) throws IOException {
final Configuration configuration = context.getConfiguration();
int numRecords = getNumberOfRecords(configuration);
int numMaps = getNumMapTasks(configuration);
int maxChunksTolerable = getMaxChunksTolerable(configuration);
// Number of chunks each map will process, on average.
int splitRatio = getListingSplitRatio(configuration, numMaps, numRecords);
validateNumChunksUsing(splitRatio, numMaps, maxChunksTolerable);
int numEntriesPerChunk = (int)Math.ceil((float)numRecords
/(splitRatio * numMaps));
DistCpUtils.publish(context.getConfiguration(),
CONF_LABEL_NUM_ENTRIES_PER_CHUNK,
numEntriesPerChunk);
final int nChunksTotal = (int)Math.ceil((float)numRecords/numEntriesPerChunk);
int nChunksOpenAtOnce
= Math.min(N_CHUNKS_OPEN_AT_ONCE_DEFAULT, nChunksTotal);
Path listingPath = getListingFilePath(configuration);
SequenceFile.Reader reader
= new SequenceFile.Reader(configuration,
SequenceFile.Reader.file(listingPath));
List<DynamicInputChunk> openChunks
= new ArrayList<DynamicInputChunk>();
List<DynamicInputChunk> chunksFinal = new ArrayList<DynamicInputChunk>();
CopyListingFileStatus fileStatus = new CopyListingFileStatus();
Text relPath = new Text();
int recordCounter = 0;
int chunkCount = 0;
try {
while (reader.next(relPath, fileStatus)) {
if (recordCounter % (nChunksOpenAtOnce*numEntriesPerChunk) == 0) {
// All chunks full. Create new chunk-set.
closeAll(openChunks);
chunksFinal.addAll(openChunks);
openChunks = createChunks(
configuration, chunkCount, nChunksTotal, nChunksOpenAtOnce);
chunkCount += openChunks.size();
nChunksOpenAtOnce = openChunks.size();
recordCounter = 0;
}
// Shuffle into open chunks.
openChunks.get(recordCounter%nChunksOpenAtOnce).write(relPath, fileStatus);
++recordCounter;
}
} finally {
closeAll(openChunks);
chunksFinal.addAll(openChunks);
IOUtils.closeStream(reader);
}
LOG.info("Number of dynamic-chunk-files created: " + chunksFinal.size());
return chunksFinal;
}
private static void validateNumChunksUsing(int splitRatio, int numMaps,
int maxChunksTolerable) throws IOException {
if (splitRatio * numMaps > maxChunksTolerable)
throw new IOException("Too many chunks created with splitRatio:"
+ splitRatio + ", numMaps:" + numMaps
+ ". Reduce numMaps or decrease split-ratio to proceed.");
}
private static void closeAll(List<DynamicInputChunk> chunks) {
for (DynamicInputChunk chunk: chunks)
chunk.close();
}
private static List<DynamicInputChunk> createChunks(Configuration config,
int chunkCount, int nChunksTotal, int nChunksOpenAtOnce)
throws IOException {
List<DynamicInputChunk> chunks = new ArrayList<DynamicInputChunk>();
int chunkIdUpperBound
= Math.min(nChunksTotal, chunkCount + nChunksOpenAtOnce);
// If there will be fewer than nChunksOpenAtOnce chunks left after
// the current batch of chunks, fold the remaining chunks into
// the current batch.
if (nChunksTotal - chunkIdUpperBound < nChunksOpenAtOnce)
chunkIdUpperBound = nChunksTotal;
for (int i=chunkCount; i < chunkIdUpperBound; ++i)
chunks.add(createChunk(i, config));
return chunks;
}
private static DynamicInputChunk createChunk(int chunkId, Configuration config)
throws IOException {
return DynamicInputChunk.createChunkForWrite(String.format("%05d", chunkId),
config);
}
private static Path getListingFilePath(Configuration configuration) {
String listingFilePathString = configuration.get(
DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
assert !listingFilePathString.equals("") : "Listing file not found.";
Path listingFilePath = new Path(listingFilePathString);
try {
assert listingFilePath.getFileSystem(configuration)
.exists(listingFilePath) : "Listing file: " + listingFilePath +
" not found.";
} catch (IOException e) {
assert false : "Listing file: " + listingFilePath
+ " couldn't be accessed. " + e.getMessage();
}
return listingFilePath;
}
private static int getNumberOfRecords(Configuration configuration) {
return DistCpUtils.getInt(configuration,
DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS);
}
private static int getNumMapTasks(Configuration configuration) {
return DistCpUtils.getInt(configuration,
JobContext.NUM_MAPS);
}
private static int getListingSplitRatio(Configuration configuration,
int numMaps, int numPaths) {
return configuration.getInt(
CONF_LABEL_LISTING_SPLIT_RATIO,
getSplitRatio(numMaps, numPaths, configuration));
}
private static int getMaxChunksTolerable(Configuration conf) {
int maxChunksTolerable = conf.getInt(
DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,
DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
if (maxChunksTolerable <= 0) {
LOG.warn(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE +
" should be positive. Fall back to default value: "
+ DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
maxChunksTolerable = DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT;
}
return maxChunksTolerable;
}
private static int getMaxChunksIdeal(Configuration conf) {
int maxChunksIdeal = conf.getInt(
DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL,
DistCpConstants.MAX_CHUNKS_IDEAL_DEFAULT);
if (maxChunksIdeal <= 0) {
LOG.warn(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL +
" should be positive. Fall back to default value: "
+ DistCpConstants.MAX_CHUNKS_IDEAL_DEFAULT);
maxChunksIdeal = DistCpConstants.MAX_CHUNKS_IDEAL_DEFAULT;
}
return maxChunksIdeal;
}
private static int getMinRecordsPerChunk(Configuration conf) {
int minRecordsPerChunk = conf.getInt(
DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK,
DistCpConstants.MIN_RECORDS_PER_CHUNK_DEFAULT);
if (minRecordsPerChunk <= 0) {
LOG.warn(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK +
" should be positive. Fall back to default value: "
+ DistCpConstants.MIN_RECORDS_PER_CHUNK_DEFAULT);
minRecordsPerChunk = DistCpConstants.MIN_RECORDS_PER_CHUNK_DEFAULT;
}
return minRecordsPerChunk;
}
private static int getSplitRatio(Configuration conf) {
int splitRatio = conf.getInt(
DistCpConstants.CONF_LABEL_SPLIT_RATIO,
DistCpConstants.SPLIT_RATIO_DEFAULT);
if (splitRatio <= 0) {
LOG.warn(DistCpConstants.CONF_LABEL_SPLIT_RATIO +
" should be positive. Fall back to default value: "
+ DistCpConstants.SPLIT_RATIO_DEFAULT);
splitRatio = DistCpConstants.SPLIT_RATIO_DEFAULT;
}
return splitRatio;
}
/**
* Package private, for testability.
* @param nMaps The number of maps requested for.
* @param nRecords The number of records to be copied.
* @return The number of splits each map should handle, ideally.
*/
static int getSplitRatio(int nMaps, int nRecords) {
return getSplitRatio(nMaps, nRecords,new Configuration());
}
/**
* Package private, for testability.
* @param nMaps The number of maps requested for.
* @param nRecords The number of records to be copied.
* @param conf The configuration set by users.
* @return The number of splits each map should handle, ideally.
*/
static int getSplitRatio(int nMaps, int nRecords, Configuration conf) {
int maxChunksIdeal = getMaxChunksIdeal(conf);
int minRecordsPerChunk = getMinRecordsPerChunk(conf);
int splitRatio = getSplitRatio(conf);
if (nMaps == 1) {
LOG.warn("nMaps == 1. Why use DynamicInputFormat?");
return 1;
}
if (nMaps > maxChunksIdeal)
return splitRatio;
int nPickups = (int)Math.ceil((float)maxChunksIdeal/nMaps);
int nRecordsPerChunk = (int)Math.ceil((float)nRecords/(nMaps*nPickups));
return nRecordsPerChunk < minRecordsPerChunk ?
splitRatio : nPickups;
}
static int getNumEntriesPerChunk(Configuration configuration) {
return DistCpUtils.getInt(configuration,
CONF_LABEL_NUM_ENTRIES_PER_CHUNK);
}
/**
* Implementation of Inputformat::createRecordReader().
* @param inputSplit The split for which the RecordReader is required.
* @param taskAttemptContext TaskAttemptContext for the current attempt.
* @return DynamicRecordReader instance.
* @throws IOException
* @throws InterruptedException
*/
@Override
public RecordReader<K, V> createRecordReader(
InputSplit inputSplit,
TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
return new DynamicRecordReader<K, V>();
}
}
| 13,820 | 37.714286 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/ClassWithNoPackage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class ClassWithNoPackage {
}
| 844 | 39.238095 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesMapApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.typedbytes.TypedBytesInput;
import org.apache.hadoop.typedbytes.TypedBytesOutput;
public class TypedBytesMapApp {
private String find;
public TypedBytesMapApp(String find) {
this.find = find;
}
public void go() throws IOException {
TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(System.out));
Object key = tbinput.readRaw();
while (key != null) {
Object value = tbinput.read();
for (String part : value.toString().split(find)) {
tboutput.write(part); // write key
tboutput.write(1); // write value
}
System.err.println("reporter:counter:UserCounters,InputLines,1");
key = tbinput.readRaw();
}
System.out.flush();
}
public static void main(String[] args) throws IOException {
TypedBytesMapApp app = new TypedBytesMapApp(args[0].replace(".","\\."));
app.go();
}
}
| 1,946 | 31.45 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingOutputOnlyKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import org.junit.Test;
public class TestStreamingOutputOnlyKeys extends TestStreaming {
public TestStreamingOutputOnlyKeys() throws IOException {
super();
}
@Test
public void testOutputOnlyKeys() throws Exception {
args.add("-jobconf"); args.add("stream.reduce.input" +
"=keyonlytext");
args.add("-jobconf"); args.add("stream.reduce.output" +
"=keyonlytext");
super.testCommandLine();
}
@Override
public String getExpectedOutput() {
return outputExpect.replaceAll("\t", "");
}
@Override
@Test
public void testCommandLine() {
// Do nothing
}
}
| 1,494 | 27.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/DelayEchoApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
/**
* A simple Java app that will consume all input from stdin, wait a few seconds
* and echoing it to stdout.
*/
public class DelayEchoApp {
public DelayEchoApp() {
}
public void go(int seconds) throws IOException, InterruptedException {
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
// Consume all input (to make sure streaming will still count this
// task as failed even if all input was consumed).
while ((line = in.readLine()) != null) {
Thread.sleep(seconds * 1000L);
System.out.println(line);
}
}
public static void main(String[] args) throws IOException, InterruptedException {
int seconds = 5;
if (args.length >= 1) {
try {
seconds = Integer.valueOf(args[0]);
} catch (NumberFormatException e) {
// just use default 5.
}
}
DelayEchoApp app = new DelayEchoApp();
app.go(seconds);
}
}
| 1,809 | 30.206897 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestClassWithNoPackage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.MalformedURLException;
import org.apache.hadoop.util.JarFinder;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
/**
* Test Hadoop StreamUtil successfully returns a class loaded by the job conf
* but has no package name.
*/
public class TestClassWithNoPackage
{
@Test
public void testGoodClassOrNull() throws Exception {
String NAME = "ClassWithNoPackage";
ClassLoader cl = TestClassWithNoPackage.class.getClassLoader();
String JAR = JarFinder.getJar(cl.loadClass(NAME));
// Add testjob jar file to classpath.
Configuration conf = new Configuration();
conf.setClassLoader(new URLClassLoader(new URL[]{new URL("file", null, JAR)},
null));
// Get class with no package name.
String defaultPackage = this.getClass().getPackage().getName();
Class c = StreamUtil.goodClassOrNull(conf, NAME, defaultPackage);
assertNotNull("Class " + NAME + " not found!", c);
}
public static void main(String[]args) throws Exception
{
new TestClassWithNoPackage().testGoodClassOrNull();
}
}
| 2,044 | 34.877193 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Before;
import org.junit.Test;
/**
* Tests if StreamXmlRecordReader will read the next record, _after_ the
* end of a split if the split falls before the end of end-tag of a record.
* Also tests if StreamXmlRecordReader will read a record twice if end of a
* split is after few characters after the end-tag of a record but before the
* begin-tag of next record.
*/
public class TestStreamXmlMultipleRecords extends TestStreaming
{
private static final Log LOG = LogFactory.getLog(
TestStreamXmlMultipleRecords.class);
private boolean hasPerl = false;
private long blockSize;
private String isSlowMatch;
// Our own configuration used for creating FileSystem object where
// fs.local.block.size is set to 60 OR 80.
// See 60th char in input. It is before the end of end-tag of a record.
// See 80th char in input. It is in between the end-tag of a record and
// the begin-tag of next record.
private Configuration conf = null;
private String myPerlMapper =
"perl -n -a -e 'print join(qq(\\n), map { qq($_\\t1) } @F), qq(\\n);'";
private String myPerlReducer =
"perl -n -a -e '$freq{$F[0]}++; END { print qq(is\\t$freq{is}\\n); }'";
public TestStreamXmlMultipleRecords() throws IOException {
super();
input = "<line>This is a single line,\nand it is containing multiple" +
" words.</line> <line>Only is appears more than" +
" once.</line>\n";
outputExpect = "is\t3\n";
map = myPerlMapper;
reduce = myPerlReducer;
hasPerl = UtilTest.hasPerlSupport();
}
@Override
@Before
public void setUp() throws IOException {
super.setUp();
// Without this closeAll() call, setting of FileSystem block size is
// not effective and will be old block size set in earlier test.
FileSystem.closeAll();
}
// Set file system block size such that split falls
// (a) before the end of end-tag of a record (testStreamXmlMultiInner...) OR
// (b) between records(testStreamXmlMultiOuter...)
@Override
protected Configuration getConf() {
conf = new Configuration();
conf.setLong("fs.local.block.size", blockSize);
return conf;
}
@Override
protected String[] genArgs() {
args.add("-inputreader");
args.add("StreamXmlRecordReader,begin=<line>,end=</line>,slowmatch=" +
isSlowMatch);
return super.genArgs();
}
/**
* Tests if StreamXmlRecordReader will read the next record, _after_ the
* end of a split if the split falls before the end of end-tag of a record.
* Tests with slowmatch=false.
* @throws Exception
*/
@Test
public void testStreamXmlMultiInnerFast() throws Exception {
if (hasPerl) {
blockSize = 60;
isSlowMatch = "false";
super.testCommandLine();
}
else {
LOG.warn("No perl; skipping test.");
}
}
/**
* Tests if StreamXmlRecordReader will read a record twice if end of a
* split is after few characters after the end-tag of a record but before the
* begin-tag of next record.
* Tests with slowmatch=false.
* @throws Exception
*/
@Test
public void testStreamXmlMultiOuterFast() throws Exception {
if (hasPerl) {
blockSize = 80;
isSlowMatch = "false";
super.testCommandLine();
}
else {
LOG.warn("No perl; skipping test.");
}
}
/**
* Tests if StreamXmlRecordReader will read the next record, _after_ the
* end of a split if the split falls before the end of end-tag of a record.
* Tests with slowmatch=true.
* @throws Exception
*/
@Test
public void testStreamXmlMultiInnerSlow() throws Exception {
if (hasPerl) {
blockSize = 60;
isSlowMatch = "true";
super.testCommandLine();
}
else {
LOG.warn("No perl; skipping test.");
}
}
/**
* Tests if StreamXmlRecordReader will read a record twice if end of a
* split is after few characters after the end-tag of a record but before the
* begin-tag of next record.
* Tests with slowmatch=true.
* @throws Exception
*/
@Test
public void testStreamXmlMultiOuterSlow() throws Exception {
if (hasPerl) {
blockSize = 80;
isSlowMatch = "true";
super.testCommandLine();
}
else {
LOG.warn("No perl; skipping test.");
}
}
@Override
@Test
public void testCommandLine() {
// Do nothing
}
}
| 5,435 | 28.383784 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* This class tests hadoopStreaming in MapReduce local mode.
* It uses Hadoop Aggregate to count the numbers of word occurrences
* in the input.
*/
public class TestStreamAggregate
{
protected File INPUT_FILE = new File("stream_aggregate_input.txt");
protected File OUTPUT_DIR = new File("stream_aggregate_out");
protected String input = "roses are red\nviolets are blue\nbunnies are pink\n";
// map parses input lines and generates count entries for each word.
protected String map = UtilTest.makeJavaCommand(StreamAggregate.class, new String[]{".", "\\n"});
// Use the aggregate combine, reducei to aggregate the counts
protected String outputExpect = "are\t3\nblue\t1\nbunnies\t1\npink\t1\nred\t1\nroses\t1\nviolets\t1\n";
private StreamJob job;
public TestStreamAggregate() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", "aggregate",
"-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
@Test
public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
public static void main(String[]args) throws Exception
{
new TestStreaming().testCommandLine();
}
}
| 3,532 | 32.971154 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMRFramework.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import static org.junit.Assert.*;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Test;
public class TestMRFramework {
@Test
public void testFramework() {
JobConf jobConf = new JobConf();
jobConf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",
StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",
StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS, "jthost:9090");
jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
assertTrue("Expected 'isLocal' to be true",
StreamUtil.isLocalJobTracker(jobConf));
}
}
| 1,897 | 37.734694 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
class UtilTest {
private static final Log LOG = LogFactory.getLog(UtilTest.class);
/**
* Utility routine to recurisvely delete a directory.
* On normal return, the file does not exist.
*
* @param file File or directory to delete.
*
* @throws RuntimeException if the file, or some file within
* it, could not be deleted.
*/
static void recursiveDelete(File file) {
file = file.getAbsoluteFile();
if (!file.exists()) return;
if (file.isDirectory()) {
for (File child : file.listFiles()) {
recursiveDelete(child);
}
}
if (!file.delete()) {
throw new RuntimeException("Failed to delete " + file);
}
}
public UtilTest(String testName) {
testName_ = testName;
userDir_ = System.getProperty("user.dir");
antTestDir_ = System.getProperty("test.build.data", userDir_);
System.out.println("test.build.data-or-user.dir=" + antTestDir_);
}
void checkUserDir() {
// // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data
// if (!userDir_.equals(antTestDir_)) {
// // because changes to user.dir are ignored by File static methods.
// throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked.");
// }
}
void redirectIfAntJunit() throws IOException
{
boolean fromAntJunit = System.getProperty("test.build.data") != null;
if (fromAntJunit) {
new File(antTestDir_).mkdirs();
File outFile = new File(antTestDir_, testName_+".log");
PrintStream out = new PrintStream(new FileOutputStream(outFile));
System.setOut(out);
System.setErr(out);
}
}
public static String collate(List<String> args, String sep) {
StringBuffer buf = new StringBuffer();
Iterator<String> it = args.iterator();
while (it.hasNext()) {
if (buf.length() > 0) {
buf.append(" ");
}
buf.append(it.next());
}
return buf.toString();
}
public static String makeJavaCommand(Class<?> main, String[] argv) {
ArrayList<String> vargs = new ArrayList<String>();
File javaHomeBin = new File(System.getProperty("java.home"), "bin");
File jvm = new File(javaHomeBin, "java");
vargs.add(jvm.toString());
// copy parent classpath
vargs.add("-classpath");
vargs.add("\"" + System.getProperty("java.class.path") + "\"");
// add heap-size limit
vargs.add("-Xmx" + Runtime.getRuntime().maxMemory());
// Add main class and its arguments
vargs.add(main.getName());
for (int i = 0; i < argv.length; i++) {
vargs.add(argv[i]);
}
return collate(vargs, " ");
}
public static boolean isCygwin() {
String OS = System.getProperty("os.name");
return (OS.indexOf("Windows") > -1);
}
/**
* Is perl supported on this machine ?
* @return true if perl is available and is working as expected
*/
public static boolean hasPerlSupport() {
boolean hasPerl = false;
ShellCommandExecutor shexec = new ShellCommandExecutor(
new String[] { "perl", "-e", "print 42" });
try {
shexec.execute();
if (shexec.getOutput().equals("42")) {
hasPerl = true;
}
else {
LOG.warn("Perl is installed, but isn't behaving as expected.");
}
} catch (Exception e) {
LOG.warn("Could not run perl: " + e);
}
return hasPerl;
}
private String userDir_;
private String antTestDir_;
private String testName_;
}
| 4,651 | 29.807947 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskID;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
/**
* Tests if mapper/reducer with empty/nonempty input works properly if
* reporting is done using lines like "reporter:status:" and
* "reporter:counter:" before map()/reduce() method is called.
* Validates the task's log of STDERR if messages are written to stderr before
* map()/reduce() is called.
* Also validates job output.
* Uses MiniMR since the local jobtracker doesn't track task status.
*/
public class TestStreamingStatus {
protected static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data","/tmp"),
TestStreamingStatus.class.getSimpleName())
.toURI().toString().replace(' ', '+');
protected String INPUT_FILE = TEST_ROOT_DIR + "/input.txt";
protected String OUTPUT_DIR = TEST_ROOT_DIR + "/out";
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
protected String map = null;
protected String reduce = null;
protected String scriptFile = TEST_ROOT_DIR + "/perlScript.pl";
protected String scriptFileName = new Path(scriptFile).toUri().getPath();
String expectedStderr = "my error msg before consuming input\n" +
"my error msg after consuming input\n";
String expectedOutput = null;// inited in setUp()
String expectedStatus = "before consuming input";
// This script does the following
// (a) setting task status before reading input
// (b) writing to stderr before reading input and after reading input
// (c) writing to stdout before reading input
// (d) incrementing user counter before reading input and after reading input
// Write lines to stdout before reading input{(c) above} is to validate
// the hanging task issue when input to task is empty(because of not starting
// output thread).
protected String script =
"#!/usr/bin/perl\n" +
"print STDERR \"reporter:status:" + expectedStatus + "\\n\";\n" +
"print STDERR \"reporter:counter:myOwnCounterGroup,myOwnCounter,1\\n\";\n" +
"print STDERR \"my error msg before consuming input\\n\";\n" +
"for($count = 1500; $count >= 1; $count--) {print STDOUT \"$count \";}" +
"while(<STDIN>) {chomp;}\n" +
"print STDERR \"my error msg after consuming input\\n\";\n" +
"print STDERR \"reporter:counter:myOwnCounterGroup,myOwnCounter,1\\n\";\n";
MiniMRCluster mr = null;
FileSystem fs = null;
JobConf conf = null;
/**
* Start the cluster and create input file before running the actual test.
*
* @throws IOException
*/
@Before
public void setUp() throws IOException {
conf = new JobConf();
conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
mr = new MiniMRCluster(1, "file:///", 3, null , null, conf);
Path inFile = new Path(INPUT_FILE);
fs = inFile.getFileSystem(mr.createJobConf());
clean(fs);
buildExpectedJobOutput();
}
/**
* Kill the cluster after the test is done.
*/
@After
public void tearDown() {
if (fs != null) { clean(fs); }
if (mr != null) { mr.shutdown(); }
}
// Updates expectedOutput to have the expected job output as a string
void buildExpectedJobOutput() {
if (expectedOutput == null) {
expectedOutput = "";
for(int i = 1500; i >= 1; i--) {
expectedOutput = expectedOutput.concat(Integer.toString(i) + " ");
}
expectedOutput = expectedOutput.trim();
}
}
// Create empty/nonempty input file.
// Create script file with the specified content.
protected void createInputAndScript(boolean isEmptyInput,
String script) throws IOException {
makeInput(fs, isEmptyInput ? "" : input);
// create script file
DataOutputStream file = fs.create(new Path(scriptFileName));
file.writeBytes(script);
file.close();
}
protected String[] genArgs(String jobtracker, String mapper, String reducer)
{
return new String[] {
"-input", INPUT_FILE,
"-output", OUTPUT_DIR,
"-mapper", mapper,
"-reducer", reducer,
"-jobconf", MRJobConfig.NUM_MAPS + "=1",
"-jobconf", MRJobConfig.NUM_REDUCES + "=1",
"-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true",
"-jobconf", "stream.tmpdir=" + new Path(TEST_ROOT_DIR).toUri().getPath(),
"-jobconf", JTConfig.JT_IPC_ADDRESS + "="+jobtracker,
"-jobconf", "fs.default.name=file:///",
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
"-jobconf", "mapreduce.framework.name=yarn"
};
}
// create input file with the given content
public void makeInput(FileSystem fs, String input) throws IOException {
Path inFile = new Path(INPUT_FILE);
DataOutputStream file = fs.create(inFile);
file.writeBytes(input);
file.close();
}
// Delete output directory
protected void deleteOutDir(FileSystem fs) {
try {
Path outDir = new Path(OUTPUT_DIR);
fs.delete(outDir, true);
} catch (Exception e) {}
}
// Delete input file, script file and output directory
public void clean(FileSystem fs) {
deleteOutDir(fs);
try {
Path file = new Path(INPUT_FILE);
if (fs.exists(file)) {
fs.delete(file, false);
}
file = new Path(scriptFile);
if (fs.exists(file)) {
fs.delete(file, false);
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Check if mapper/reducer with empty/nonempty input works properly if
* reporting is done using lines like "reporter:status:" and
* "reporter:counter:" before map()/reduce() method is called.
* Validate the task's log of STDERR if messages are written
* to stderr before map()/reduce() is called.
* Also validate job output.
*
* @throws IOException
*/
@Test
public void testReporting() throws Exception {
testStreamJob(false);// nonempty input
testStreamJob(true);// empty input
}
/**
* Run a streaming job with the given script as mapper and validate.
* Run another streaming job with the given script as reducer and validate.
*
* @param isEmptyInput Should the input to the script be empty ?
* @param script The content of the script that will run as the streaming task
*/
private void testStreamJob(boolean isEmptyInput)
throws IOException {
createInputAndScript(isEmptyInput, script);
// Check if streaming mapper works as expected
map = scriptFileName;
reduce = "/bin/cat";
runStreamJob(TaskType.MAP, isEmptyInput);
deleteOutDir(fs);
// Check if streaming reducer works as expected.
map = "/bin/cat";
reduce = scriptFileName;
runStreamJob(TaskType.REDUCE, isEmptyInput);
clean(fs);
}
// Run streaming job for the specified input file, mapper and reducer and
// (1) Validate if the job succeeds.
// (2) Validate if user counter is incremented properly for the cases of
// (a) nonempty input to map
// (b) empty input to map and
// (c) nonempty input to reduce
// (3) Validate task status for the cases of (2)(a),(2)(b),(2)(c).
// Because empty input to reduce task => reporter is dummy and ignores
// all "reporter:status" and "reporter:counter" lines.
// (4) Validate stderr of task of given task type.
// (5) Validate job output
void runStreamJob(TaskType type, boolean isEmptyInput) throws IOException {
boolean mayExit = false;
StreamJob job = new StreamJob(genArgs(
mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS), map, reduce), mayExit);
int returnValue = job.go();
assertEquals(0, returnValue);
// If input to reducer is empty, dummy reporter(which ignores all
// reporting lines) is set for MRErrorThread in waitOutputThreads(). So
// expectedCounterValue is 0 for empty-input-to-reducer case.
// Output of reducer is also empty for empty-input-to-reducer case.
int expectedCounterValue = 0;
if (type == TaskType.MAP || !isEmptyInput) {
validateTaskStatus(job, type);
// output is from "print STDOUT" statements in perl script
validateJobOutput(job.getConf());
expectedCounterValue = 2;
}
validateUserCounter(job, expectedCounterValue);
validateTaskStderr(job, type);
deleteOutDir(fs);
}
// validate task status of task of given type(validates 1st task of that type)
void validateTaskStatus(StreamJob job, TaskType type) throws IOException {
// Map Task has 2 phases: map, sort
// Reduce Task has 3 phases: copy, sort, reduce
String finalPhaseInTask;
TaskReport[] reports;
if (type == TaskType.MAP) {
reports = job.jc_.getMapTaskReports(job.jobId_);
finalPhaseInTask = "sort";
} else {// reduce task
reports = job.jc_.getReduceTaskReports(job.jobId_);
finalPhaseInTask = "reduce";
}
assertEquals(1, reports.length);
assertEquals(expectedStatus + " > " + finalPhaseInTask,
reports[0].getState());
}
// Validate the job output
void validateJobOutput(Configuration conf)
throws IOException {
String output = MapReduceTestUtil.readOutput(
new Path(OUTPUT_DIR), conf).trim();
assertTrue(output.equals(expectedOutput));
}
// Validate stderr task log of given task type(validates 1st
// task of that type).
void validateTaskStderr(StreamJob job, TaskType type)
throws IOException {
TaskAttemptID attemptId =
new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);
String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
attemptId, false);
// trim() is called on expectedStderr here because the method
// MapReduceTestUtil.readTaskLog() returns trimmed String.
assertTrue(log.equals(expectedStderr.trim()));
}
// Validate if user counter is incremented properly
void validateUserCounter(StreamJob job, int expectedCounterValue)
throws IOException {
Counters counters = job.running_.getCounters();
assertEquals(expectedCounterValue, counters.findCounter(
"myOwnCounterGroup", "myOwnCounter").getValue());
}
}
| 11,662 | 34.557927 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/ValueCountReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.Date;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
public class ValueCountReduce implements Reducer {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
}
public void reduce(Object arg0, Iterator arg1, OutputCollector arg2, Reporter arg3) throws IOException {
int count = 0;
while (arg1.hasNext()) {
count += 1;
arg1.next();
}
arg2.collect(arg0, new Text("" + count));
}
public void configure(JobConf arg0) {
// TODO Auto-generated method stub
}
public void close() throws IOException {
// TODO Auto-generated method stub
}
}
| 1,802 | 26.738462 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
public class RawBytesReduceApp {
private DataInputStream dis;
public RawBytesReduceApp() {
dis = new DataInputStream(System.in);
}
public void go() throws IOException {
String prevKey = null;
int sum = 0;
String key = readString();
while (key != null) {
if (prevKey != null && !key.equals(prevKey)) {
System.out.println(prevKey + "\t" + sum);
sum = 0;
}
sum += readInt();
prevKey = key;
key = readString();
}
System.out.println(prevKey + "\t" + sum);
System.out.flush();
}
public static void main(String[] args) throws IOException {
RawBytesReduceApp app = new RawBytesReduceApp();
app.go();
}
private String readString() throws IOException {
int length;
try {
length = dis.readInt();
} catch (EOFException eof) {
return null;
}
byte[] bytes = new byte[length];
dis.readFully(bytes);
return new String(bytes, "UTF-8");
}
private int readInt() throws IOException {
dis.readInt(); // ignore (we know it's 4)
IntWritable iw = new IntWritable();
iw.readFields(dis);
return iw.get();
}
}
| 2,136 | 27.493333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.IOException;
import java.io.DataOutputStream;
import java.util.Map;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.hdfs.MiniDFSCluster;
/**
* This class tests cacheArchive option of streaming
* The test case creates 2 archive files, ships it with hadoop
* streaming and compares the output with expected output
*/
public class TestMultipleArchiveFiles extends TestStreaming
{
private static final Log LOG = LogFactory.getLog(TestMultipleArchiveFiles.class);
private StreamJob job;
private String INPUT_DIR = "multiple-archive-files/";
private String INPUT_FILE = INPUT_DIR + "input.txt";
private String CACHE_ARCHIVE_1 = INPUT_DIR + "cacheArchive1.zip";
private File CACHE_FILE_1 = null;
private String CACHE_ARCHIVE_2 = INPUT_DIR + "cacheArchive2.zip";
private File CACHE_FILE_2 = null;
private String expectedOutput = null;
private String OUTPUT_DIR = "out";
private Configuration conf = null;
private MiniDFSCluster dfs = null;
private MiniMRCluster mr = null;
private FileSystem fileSys = null;
private String namenode = null;
public TestMultipleArchiveFiles() throws Exception {
CACHE_FILE_1 = new File("cacheArchive1");
CACHE_FILE_2 = new File("cacheArchive2");
input = "HADOOP";
expectedOutput = "HADOOP\t\nHADOOP\t\n";
conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().getAuthority();
mr = new MiniMRCluster(1, namenode, 1);
map = XARGS_CAT;
reduce = CAT;
}
@Override
protected void setInputOutput() {
inputFile = INPUT_FILE;
outDir = OUTPUT_DIR;
}
protected void createInput() throws IOException
{
fileSys.delete(new Path(INPUT_DIR), true);
DataOutputStream dos = fileSys.create(new Path(INPUT_FILE));
String inputFileString = "symlink1" + File.separator
+ "cacheArchive1\nsymlink2" + File.separator + "cacheArchive2";
dos.write(inputFileString.getBytes("UTF-8"));
dos.close();
DataOutputStream out = fileSys.create(new Path(CACHE_ARCHIVE_1.toString()));
ZipOutputStream zos = new ZipOutputStream(out);
ZipEntry ze = new ZipEntry(CACHE_FILE_1.toString());
zos.putNextEntry(ze);
zos.write(input.getBytes("UTF-8"));
zos.closeEntry();
zos.close();
out = fileSys.create(new Path(CACHE_ARCHIVE_2.toString()));
zos = new ZipOutputStream(out);
ze = new ZipEntry(CACHE_FILE_2.toString());
zos.putNextEntry(ze);
zos.write(input.getBytes("UTF-8"));
zos.closeEntry();
zos.close();
}
protected String[] genArgs() {
String workDir = fileSys.getWorkingDirectory().toString() + "/";
String cache1 = workDir + CACHE_ARCHIVE_1 + "#symlink1";
String cache2 = workDir + CACHE_ARCHIVE_2 + "#symlink2";
for (Map.Entry<String, String> entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
args.add("-jobconf");
args.add("mapreduce.job.reduces=1");
args.add("-cacheArchive");
args.add(cache1);
args.add("-cacheArchive");
args.add(cache2);
args.add("-jobconf");
args.add("mapred.jar=" + STREAMING_JAR);
return super.genArgs();
}
protected void checkOutput() throws IOException {
StringBuffer output = new StringBuffer(256);
Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
new Path(OUTPUT_DIR)));
for (int i = 0; i < fileList.length; i++){
LOG.info("Adding output from file: " + fileList[i]);
output.append(StreamUtil.slurpHadoop(fileList[i], fileSys));
}
assertOutput(expectedOutput, output.toString());
}
}
| 4,895 | 33.971429 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* This class tests hadoop Streaming's StreamJob class.
*/
public class TestStreamJob {
@Test(expected = IllegalArgumentException.class)
public void testCreateJobWithExtraArgs() throws IOException {
ArrayList<String> dummyArgs = new ArrayList<String>();
dummyArgs.add("-input"); dummyArgs.add("dummy");
dummyArgs.add("-output"); dummyArgs.add("dummy");
dummyArgs.add("-mapper"); dummyArgs.add("dummy");
dummyArgs.add("dummy");
dummyArgs.add("-reducer"); dummyArgs.add("dummy");
StreamJob.createJob(dummyArgs.toArray(new String[] {}));
}
@Test
public void testCreateJob() throws IOException {
JobConf job;
ArrayList<String> dummyArgs = new ArrayList<String>();
dummyArgs.add("-input"); dummyArgs.add("dummy");
dummyArgs.add("-output"); dummyArgs.add("dummy");
dummyArgs.add("-mapper"); dummyArgs.add("dummy");
dummyArgs.add("-reducer"); dummyArgs.add("dummy");
ArrayList<String> args;
args = new ArrayList<String>(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat");
job = StreamJob.createJob(args.toArray(new String[] {}));
assertEquals(KeyValueTextInputFormat.class, job.getInputFormat().getClass());
args = new ArrayList<String>(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.SequenceFileInputFormat");
job = StreamJob.createJob(args.toArray(new String[] {}));
assertEquals(SequenceFileInputFormat.class, job.getInputFormat().getClass());
args = new ArrayList<String>(dummyArgs);
args.add("-inputformat");
args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat");
args.add("-inputreader");
args.add("StreamXmlRecordReader,begin=<doc>,end=</doc>");
job = StreamJob.createJob(args.toArray(new String[] {}));
assertEquals(StreamInputFormat.class, job.getInputFormat().getClass());
}
@Test
public void testOptions() throws Exception {
StreamJob streamJob = new StreamJob();
assertEquals(1, streamJob.run(new String[0]));
assertEquals(0, streamJob.run(new String[] {"-help"}));
assertEquals(0, streamJob.run(new String[] {"-info"}));
}
}
| 3,316 | 37.569767 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import static org.junit.Assert.*;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
public class TestUnconsumedInput {
protected final int EXPECTED_OUTPUT_SIZE = 10000;
protected File INPUT_FILE = new File("stream_uncinput_input.txt");
protected File OUTPUT_DIR = new File("stream_uncinput_out");
// map parses input lines and generates count entries for each word.
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
protected String map = UtilTest.makeJavaCommand(OutputOnlyApp.class,
new String[]{Integer.toString(EXPECTED_OUTPUT_SIZE)});
private StreamJob job;
public TestUnconsumedInput() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
for (int i=0; i<10000; ++i) {
out.write(input.getBytes("UTF-8"));
}
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", "org.apache.hadoop.mapred.lib.IdentityReducer",
"-numReduceTasks", "0",
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
@Test
public void testUnconsumedInput() throws Exception
{
String outFileName = "part-00000";
File outFile = null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
// setup config to ignore unconsumed input
Configuration conf = new Configuration();
conf.set("stream.minRecWrittenToEnableSkip_", "0");
job = new StreamJob();
job.setConf(conf);
int exitCode = job.run(genArgs());
assertEquals("Job failed", 0, exitCode);
outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
assertEquals("Output was truncated", EXPECTED_OUTPUT_SIZE,
StringUtils.countMatches(output, "\t"));
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
}
| 3,658 | 32.87963 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestDumpTypedBytes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.streaming.DumpTypedBytes;
import org.apache.hadoop.typedbytes.TypedBytesInput;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestDumpTypedBytes {
@Test
public void testDumping() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FileSystem fs = cluster.getFileSystem();
PrintStream psBackup = System.out;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
DumpTypedBytes dumptb = new DumpTypedBytes(conf);
try {
Path root = new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
OutputStreamWriter writer = new OutputStreamWriter(fs.create(new Path(
root, "test.txt")));
try {
for (int i = 0; i < 100; i++) {
writer.write("" + (10 * i) + "\n");
}
} finally {
writer.close();
}
String[] args = new String[1];
args[0] = "/typedbytestest";
int ret = dumptb.run(args);
assertEquals("Return value != 0.", 0, ret);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(in));
int counter = 0;
Object key = tbinput.read();
while (key != null) {
assertEquals(Long.class, key.getClass()); // offset
Object value = tbinput.read();
assertEquals(String.class, value.getClass());
assertTrue("Invalid output.",
Integer.parseInt(value.toString()) % 10 == 0);
counter++;
key = tbinput.read();
}
assertEquals("Wrong number of outputs.", 100, counter);
} finally {
try {
fs.close();
} catch (Exception e) {
}
System.setOut(psBackup);
cluster.shutdown();
}
}
}
| 3,162 | 32.294737 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import org.apache.hadoop.io.IntWritable;
public class RawBytesMapApp {
private String find;
private DataOutputStream dos;
public RawBytesMapApp(String find) {
this.find = find;
dos = new DataOutputStream(System.out);
}
public void go() throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
while ((line = in.readLine()) != null) {
for (String part : line.split(find)) {
writeString(part); // write key
writeInt(1); // write value
}
}
System.out.flush();
}
public static void main(String[] args) throws IOException {
RawBytesMapApp app = new RawBytesMapApp(args[0].replace(".","\\."));
app.go();
}
private void writeString(String str) throws IOException {
byte[] bytes = str.getBytes("UTF-8");
dos.writeInt(bytes.length);
dos.write(bytes);
}
private void writeInt(int i) throws IOException {
dos.writeInt(4);
IntWritable iw = new IntWritable(i);
iw.write(dos);
}
}
| 2,023 | 29.666667 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamReduceNone.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import static org.junit.Assert.*;
import org.junit.Test;
/**
* This class tests hadoopStreaming in MapReduce local mode.
* It tests the case where number of reducers is set to 0.
In this case, the mappers are expected to write out outputs directly.
No reducer/combiner will be activated.
*/
public class TestStreamReduceNone
{
protected File INPUT_FILE = new File("stream_reduce_none_input.txt");
protected File OUTPUT_DIR = new File("stream_reduce_none_out");
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
// map parses input lines and generates count entries for each word.
protected String map = UtilTest.makeJavaCommand(TrApp.class, new String[]{".", "\\n"});
protected String outputExpect = "roses\t\nare\t\nred\t\nviolets\t\nare\t\nblue\t\nbunnies\t\nare\t\npink\t\n";
private StreamJob job;
public TestStreamReduceNone() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", "org.apache.hadoop.mapred.lib.IdentityReducer",
"-numReduceTasks", "0",
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "mapreduce.job.maps=1",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
@Test
public void testCommandLine() throws Exception
{
String outFileName = "part-00000";
File outFile = null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
public static void main(String[]args) throws Exception
{
new TestStreamReduceNone().testCommandLine();
}
}
| 3,797 | 33.216216 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* This class tests StreamXmlRecordReader
* The test creates an XML file, uses StreamXmlRecordReader and compares
* the expected output against the generated output
*/
public class TestStreamXmlRecordReader extends TestStreaming {
public TestStreamXmlRecordReader() throws IOException {
INPUT_FILE = new File("target/input.xml");
input = "<xmltag>\t\nroses.are.red\t\nviolets.are.blue\t\n" +
"bunnies.are.pink\t\n</xmltag>\t\n";
map = CAT;
reduce = "NONE";
outputExpect = input;
}
@Override
protected void createInput() throws IOException
{
FileOutputStream out = new FileOutputStream(INPUT_FILE.getAbsoluteFile());
String dummyXmlStartTag = "<PATTERN>\n";
String dummyXmlEndTag = "</PATTERN>\n";
out.write(dummyXmlStartTag.getBytes("UTF-8"));
out.write(input.getBytes("UTF-8"));
out.write(dummyXmlEndTag.getBytes("UTF-8"));
out.close();
}
@Override
protected String[] genArgs() {
args.add("-inputreader");
args.add("StreamXmlRecordReader,begin=<xmltag>,end=</xmltag>");
args.add("-jobconf");
args.add("mapreduce.job.maps=1");
return super.genArgs();
}
}
| 2,088 | 32.693548 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.zip.GZIPOutputStream;
/**
* This class tests gzip input streaming in MapReduce local mode.
*/
public class TestGzipInput extends TestStreaming
{
public TestGzipInput() throws IOException {
INPUT_FILE = new File(TEST_DIR, "input.txt.gz");
}
protected void createInput() throws IOException
{
GZIPOutputStream out = new GZIPOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
}
| 1,425 | 31.409091 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import org.apache.hadoop.mapred.Counters;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestStreamingCombiner extends TestStreaming {
protected String combine = UtilTest.makeJavaCommand(
UniqApp.class, new String[]{""});
public TestStreamingCombiner() throws IOException {
super();
}
protected String[] genArgs() {
args.add("-combiner");
args.add(combine);
return super.genArgs();
}
@Test
public void testCommandLine() throws Exception {
super.testCommandLine();
// validate combiner counters
String counterGrp = "org.apache.hadoop.mapred.Task$Counter";
Counters counters = job.running_.getCounters();
assertTrue(counters.findCounter(
counterGrp, "COMBINE_INPUT_RECORDS").getValue() != 0);
assertTrue(counters.findCounter(
counterGrp, "COMBINE_OUTPUT_RECORDS").getValue() != 0);
}
}
| 1,784 | 31.454545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamDataProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* This class tests hadoopStreaming in MapReduce local mode.
*/
public class TestStreamDataProtocol
{
// "map" command: grep -E (red|green|blue)
// reduce command: uniq
protected File INPUT_FILE = new File("input_for_data_protocol_test.txt");
protected File OUTPUT_DIR = new File("out_for_data_protocol_test");
protected String input = "roses.smell.good\nroses.look.good\nroses.need.care\nroses.attract.bees\nroses.are.red\nroses.are.not.blue\nbunnies.are.pink\nbunnies.run.fast\nbunnies.have.short.tail\nbunnies.have.long.ears\n";
// map behaves like "/usr/bin/cat";
protected String map = UtilTest.makeJavaCommand(TrApp.class, new String[]{".", "."});
// reduce counts the number of values for each key
protected String reduce = "org.apache.hadoop.streaming.ValueCountReduce";
protected String outputExpect = "bunnies.are\t1\nbunnies.have\t2\nbunnies.run\t1\nroses.are\t2\nroses.attract\t1\nroses.look\t1\nroses.need\t1\nroses.smell\t1\n";
private StreamJob job;
public TestStreamDataProtocol() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", reduce,
"-partitioner", KeyFieldBasedPartitioner.class.getCanonicalName(),
"-jobconf", "stream.map.output.field.separator=.",
"-jobconf", "stream.num.map.output.key.fields=2",
"-jobconf", "mapreduce.map.output.key.field.separator=.",
"-jobconf", "num.key.fields.for.partition=1",
"-jobconf", "mapreduce.job.reduces=2",
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
@Test
public void testCommandLine() throws Exception
{
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
try {
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
System.err.println(" equals=" + outputExpect.compareTo(output));
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
public static void main(String[]args) throws Exception
{
new TestStreamDataProtocol().testCommandLine();
}
}
| 4,324 | 35.652542 | 222 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import org.apache.hadoop.streaming.Environment;
/** A minimal Java implementation of /usr/bin/tr.
* Used to test the usage of external applications without adding
* platform-specific dependencies.
* Use TrApp as mapper only. For reducer, use TrAppReduce.
*/
public class TrApp
{
public TrApp(char find, char replace)
{
this.find = find;
this.replace = replace;
}
void testParentJobConfToEnvVars() throws IOException
{
env = new Environment();
// test that some JobConf properties are exposed as expected
// Note the dots translated to underscore:
// property names have been escaped in PipeMapRed.safeEnvVarName()
expectDefined("mapreduce_cluster_local_dir");
expect("mapreduce_map_output_key_class", "org.apache.hadoop.io.Text");
expect("mapreduce_map_output_value_class", "org.apache.hadoop.io.Text");
expect("mapreduce_task_ismap", "true");
expectDefined("mapreduce_task_attempt_id");
expectDefined("mapreduce_map_input_file");
expectDefined("mapreduce_map_input_length");
expectDefined("mapreduce_task_io_sort_factor");
// the FileSplit context properties are not available in local hadoop..
// so can't check them in this test.
// verify some deprecated properties appear for older stream jobs
expect("map_input_file", env.getProperty("mapreduce_map_input_file"));
expect("map_input_length", env.getProperty("mapreduce_map_input_length"));
}
// this runs in a subprocess; won't use JUnit's assertTrue()
void expect(String evName, String evVal) throws IOException
{
String got = env.getProperty(evName);
if (!evVal.equals(got)) {
String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal;
throw new IOException(msg);
}
}
void expectDefined(String evName) throws IOException
{
String got = env.getProperty(evName);
if (got == null) {
String msg = "FAIL evName=" + evName + " is undefined. Expect defined.";
throw new IOException(msg);
}
}
public void go() throws IOException
{
testParentJobConfToEnvVars();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
while ((line = in.readLine()) != null) {
String out = line.replace(find, replace);
System.out.println(out);
System.err.println("reporter:counter:UserCounters,InputLines,1");
}
}
public static void main(String[] args) throws IOException
{
args[0] = CUnescape(args[0]);
args[1] = CUnescape(args[1]);
TrApp app = new TrApp(args[0].charAt(0), args[1].charAt(0));
app.go();
}
public static String CUnescape(String s)
{
if (s.equals("\\n")) {
return "\n";
} else {
return s;
}
}
char find;
char replace;
Environment env;
}
| 3,678 | 30.444444 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestAutoInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.streaming.AutoInputFormat;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestAutoInputFormat {
private static Configuration conf = new Configuration();
private static final int LINES_COUNT = 3;
private static final int RECORDS_COUNT = 3;
private static final int SPLITS_COUNT = 2;
@SuppressWarnings( { "unchecked", "deprecation" })
@Test
public void testFormat() throws IOException {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
Path txtFile = new Path(dir, "auto.txt");
Path seqFile = new Path(dir, "auto.seq");
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Writer txtWriter = new OutputStreamWriter(fs.create(txtFile));
try {
for (int i = 0; i < LINES_COUNT; i++) {
txtWriter.write("" + (10 * i));
txtWriter.write("\n");
}
} finally {
txtWriter.close();
}
SequenceFile.Writer seqWriter = SequenceFile.createWriter(fs, conf,
seqFile, IntWritable.class, LongWritable.class);
try {
for (int i = 0; i < RECORDS_COUNT; i++) {
IntWritable key = new IntWritable(11 * i);
LongWritable value = new LongWritable(12 * i);
seqWriter.append(key, value);
}
} finally {
seqWriter.close();
}
AutoInputFormat format = new AutoInputFormat();
InputSplit[] splits = format.getSplits(job, SPLITS_COUNT);
for (InputSplit split : splits) {
RecordReader reader = format.getRecordReader(split, job, Reporter.NULL);
Object key = reader.createKey();
Object value = reader.createValue();
try {
while (reader.next(key, value)) {
if (key instanceof LongWritable) {
assertEquals("Wrong value class.", Text.class, value.getClass());
assertTrue("Invalid value", Integer.parseInt(((Text) value)
.toString()) % 10 == 0);
} else {
assertEquals("Wrong key class.", IntWritable.class, key.getClass());
assertEquals("Wrong value class.", LongWritable.class, value
.getClass());
assertTrue("Invalid key.", ((IntWritable) key).get() % 11 == 0);
assertTrue("Invalid value.", ((LongWritable) value).get() % 12 == 0);
}
}
} finally {
reader.close();
}
}
}
}
| 3,900 | 33.219298 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingSeparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
/**
* This class tests hadoopStreaming with customized separator in MapReduce local mode.
*/
public class TestStreamingSeparator
{
// "map" command: grep -E (red|green|blue)
// reduce command: uniq
protected File INPUT_FILE = new File("TestStreamingSeparator.input.txt");
protected File OUTPUT_DIR = new File("TestStreamingSeparator.out");
protected String input = "roses1are.red\nviolets1are.blue\nbunnies1are.pink\n";
// mapreduce.input.keyvaluelinerecordreader.key.value.separator reads 1 as separator
// stream.map.input.field.separator uses 2 as separator
// map behaves like "/usr/bin/tr 2 3"; (translate 2 to 3)
protected String map = UtilTest.makeJavaCommand(TrApp.class, new String[]{"2", "3"});
// stream.map.output.field.separator recognize 3 as separator
// stream.reduce.input.field.separator recognize 3 as separator
// reduce behaves like "/usr/bin/tr 3 4"; (translate 3 to 4)
protected String reduce = UtilTest.makeJavaCommand(TrAppReduce.class, new String[]{"3", "4"});
// stream.reduce.output.field.separator recognize 4 as separator
// mapreduce.output.textoutputformat.separator outputs 5 as separator
protected String outputExpect = "bunnies5are.pink\nroses5are.red\nviolets5are.blue\n";
private StreamJob job;
public TestStreamingSeparator() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", reduce,
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-inputformat", "KeyValueTextInputFormat",
"-jobconf", "mapreduce.input.keyvaluelinerecordreader.key.value.separator=1",
"-jobconf", "stream.map.input.field.separator=2",
"-jobconf", "stream.map.output.field.separator=3",
"-jobconf", "stream.reduce.input.field.separator=3",
"-jobconf", "stream.reduce.output.field.separator=4",
"-jobconf", "mapreduce.output.textoutputformat.separator=5",
};
}
@Test
public void testCommandLine() throws Exception
{
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
public static void main(String[]args) throws Exception
{
new TestStreamingSeparator().testCommandLine();
}
}
| 4,561 | 36.089431 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCounters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
/**
* This class tests streaming counters in MapReduce local mode.
*/
public class TestStreamingCounters extends TestStreaming {
public TestStreamingCounters() throws IOException {
super();
}
@Test
public void testCommandLine() throws Exception {
super.testCommandLine();
validateCounters();
}
private void validateCounters() throws IOException {
Counters counters = job.running_.getCounters();
assertNotNull("Counters", counters);
Group group = counters.getGroup("UserCounters");
assertNotNull("Group", group);
Counter counter = group.getCounterForName("InputLines");
assertNotNull("Counter", counter);
assertEquals(3, counter.getCounter());
}
}
| 1,781 | 32 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBackground.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.junit.Before;
import org.junit.Test;
/**
* This class tests if hadoopStreaming background works fine. A DelayEchoApp
* with 10 seconds delay is submited.
*/
public class TestStreamingBackground {
protected File TEST_DIR = new File("target/TestStreamingBackground")
.getAbsoluteFile();
protected File INPUT_FILE = new File(TEST_DIR, "input.txt");
protected File OUTPUT_DIR = new File(TEST_DIR, "out");
protected String tenSecondsTask = UtilTest.makeJavaCommand(
DelayEchoApp.class, new String[] { "10" });
public TestStreamingBackground() throws IOException {
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected String[] args = new String[] {
"-background",
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", tenSecondsTask,
"-reducer", tenSecondsTask,
"-jobconf", "stream.tmpdir=" + System.getProperty("test.build.data", "/tmp"),
"-jobconf", "mapreduce.task.io.sort.mb=10"
};
@Before
public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
FileOutputStream out = new FileOutputStream(INPUT_FILE.getAbsoluteFile());
out.write("hello\n".getBytes());
out.close();
}
public void runStreamJob() throws Exception {
boolean mayExit = false;
int returnStatus = 0;
StreamJob job = new StreamJob(args, mayExit);
returnStatus = job.go();
assertEquals("Streaming Job expected to succeed", 0, returnStatus);
job.running_.killJob();
job.running_.waitForCompletion();
}
@Test
public void testBackgroundSubmitOk() throws Exception {
runStreamJob();
}
}
| 2,801 | 30.840909 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStderr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test that streaming consumes stderr from the streaming process
* (before, during, and after the main processing of mapred input),
* and that stderr messages count as task progress.
*/
public class TestStreamingStderr
{
public TestStreamingStderr() throws IOException {
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected String[] genArgs(File input, File output, int preLines, int duringLines, int postLines) {
return new String[] {
"-input", input.getAbsolutePath(),
"-output", output.getAbsolutePath(),
"-mapper", UtilTest.makeJavaCommand(StderrApp.class,
new String[]{Integer.toString(preLines),
Integer.toString(duringLines),
Integer.toString(postLines)}),
"-reducer", StreamJob.REDUCE_NONE,
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "mapreduce.task.timeout=5000",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
protected File setupInput(String base, boolean hasInput) throws IOException {
File input = new File(base + "-input.txt");
UtilTest.recursiveDelete(input);
FileOutputStream in = new FileOutputStream(input.getAbsoluteFile());
if (hasInput) {
in.write("hello\n".getBytes());
}
in.close();
return input;
}
protected File setupOutput(String base) throws IOException {
File output = new File(base + "-out");
UtilTest.recursiveDelete(output);
return output;
}
public void runStreamJob(String baseName, boolean hasInput,
int preLines, int duringLines, int postLines)
throws Exception {
File input = setupInput(baseName, hasInput);
File output = setupOutput(baseName);
boolean mayExit = false;
int returnStatus = 0;
StreamJob job = new StreamJob(genArgs(input, output, preLines, duringLines, postLines), mayExit);
returnStatus = job.go();
assertEquals("StreamJob success", 0, returnStatus);
}
// This test will fail by blocking forever if the stderr isn't
// consumed by Hadoop for tasks that don't have any input.
@Test
public void testStderrNoInput() throws Exception {
runStreamJob("target/stderr-pre", false, 10000, 0, 0);
}
// Streaming should continue to read stderr even after all input has
// been consumed.
@Test
public void testStderrAfterOutput() throws Exception {
runStreamJob("target/stderr-post", false, 0, 0, 10000);
}
// This test should produce a task timeout if stderr lines aren't
// counted as progress. This won't actually work until
// LocalJobRunner supports timeouts.
@Test
public void testStderrCountsAsProgress() throws Exception {
runStreamJob("target/stderr-progress", true, 10, 1000, 0);
}
}
| 4,045 | 35.45045 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.Iterator;
/**
* Tests stream job with java tasks, commands in MapReduce local mode.
* Validates if user-set config properties
* {@link MRJobConfig#MAP_OUTPUT_KEY_CLASS} and
* {@link MRJobConfig#OUTPUT_KEY_CLASS} are honored by streaming jobs.
*/
public class TestStreamingOutputKeyValueTypes extends TestStreaming {
public TestStreamingOutputKeyValueTypes() throws IOException {
super();
input = "one line dummy input\n";
}
@Before
@Override
public void setUp() throws IOException {
args.clear();
super.setUp();
}
@Override
protected String[] genArgs() {
// set the testcase-specific config properties first and the remaining
// arguments are set in TestStreaming.genArgs().
args.add("-jobconf");
args.add(MRJobConfig.MAP_OUTPUT_KEY_CLASS +
"=org.apache.hadoop.io.LongWritable");
args.add("-jobconf");
args.add(MRJobConfig.OUTPUT_KEY_CLASS +
"=org.apache.hadoop.io.LongWritable");
// Using SequenceFileOutputFormat here because with TextOutputFormat, the
// mapred.output.key.class set in JobConf (which we want to test here) is
// not read/used at all.
args.add("-outputformat");
args.add("org.apache.hadoop.mapred.SequenceFileOutputFormat");
return super.genArgs();
}
@Override
protected void checkOutput() throws IOException {
// No need to validate output for the test cases in this class
}
public static class MyReducer<K, V>
extends MapReduceBase implements Reducer<K, V, LongWritable, Text> {
public void reduce(K key, Iterator<V> values,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
LongWritable l = new LongWritable();
while (values.hasNext()) {
output.collect(l, new Text(values.next().toString()));
}
}
}
// Check with Java Mapper, Java Reducer
@Test
public void testJavaMapperAndJavaReducer() throws Exception {
map = "org.apache.hadoop.mapred.lib.IdentityMapper";
reduce = "org.apache.hadoop.mapred.lib.IdentityReducer";
super.testCommandLine();
}
// Check with Java Mapper, Java Reducer and -numReduceTasks 0
@Test
public void testJavaMapperAndJavaReducerAndZeroReduces() throws Exception {
map = "org.apache.hadoop.mapred.lib.IdentityMapper";
reduce = "org.apache.hadoop.mapred.lib.IdentityReducer";
args.add("-numReduceTasks");
args.add("0");
super.testCommandLine();
}
// Check with Java Mapper, Reducer = "NONE"
@Test
public void testJavaMapperWithReduceNone() throws Exception {
map = "org.apache.hadoop.mapred.lib.IdentityMapper";
reduce = "NONE";
super.testCommandLine();
}
// Check with Java Mapper, command Reducer
@Test
public void testJavaMapperAndCommandReducer() throws Exception {
map = "org.apache.hadoop.mapred.lib.IdentityMapper";
reduce = CAT;
super.testCommandLine();
}
// Check with Java Mapper, command Reducer and -numReduceTasks 0
@Test
public void testJavaMapperAndCommandReducerAndZeroReduces() throws Exception {
map = "org.apache.hadoop.mapred.lib.IdentityMapper";
reduce = CAT;
args.add("-numReduceTasks");
args.add("0");
super.testCommandLine();
}
// Check with Command Mapper, Java Reducer
@Test
public void testCommandMapperAndJavaReducer() throws Exception {
map = CAT;
reduce = MyReducer.class.getName();
super.testCommandLine();
}
// Check with Command Mapper, Java Reducer and -numReduceTasks 0
@Test
public void testCommandMapperAndJavaReducerAndZeroReduces() throws Exception {
map = CAT;
reduce = MyReducer.class.getName();
args.add("-numReduceTasks");
args.add("0");
super.testCommandLine();
}
// Check with Command Mapper, Reducer = "NONE"
@Test
public void testCommandMapperWithReduceNone() throws Exception {
map = CAT;
reduce = "NONE";
super.testCommandLine();
}
// Check with Command Mapper, Command Reducer
@Test
public void testCommandMapperAndCommandReducer() throws Exception {
map = CAT;
reduce = CAT;
super.testCommandLine();
}
// Check with Command Mapper, Command Reducer and -numReduceTasks 0
@Test
public void testCommandMapperAndCommandReducerAndZeroReduces()
throws Exception {
map = CAT;
reduce = CAT;
args.add("-numReduceTasks");
args.add("0");
super.testCommandLine();
}
@Test
public void testDefaultToIdentityReducer() throws Exception {
args.add("-mapper");args.add(map);
args.add("-jobconf");
args.add("mapreduce.task.files.preserve.failedtasks=true");
args.add("-jobconf");
args.add("stream.tmpdir="+System.getProperty("test.build.data","/tmp"));
args.add("-inputformat");args.add(TextInputFormat.class.getName());
super.testCommandLine();
}
@Override
@Test
public void testCommandLine() {
// Do nothing
}
}
| 6,192 | 30.120603 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
/**
* This test case tests the symlink creation
* utility provided by distributed caching
*/
public class TestSymLink
{
String INPUT_FILE = "/testing-streaming/input.txt";
String OUTPUT_DIR = "/testing-streaming/out";
String CACHE_FILE = "/testing-streaming/cache.txt";
String input = "check to see if we can read this none reduce";
String map = TestStreaming.XARGS_CAT;
String reduce = TestStreaming.CAT;
String mapString = "testlink\n";
String cacheString = "This is just the cache string";
StreamJob job;
@Test (timeout = 120000)
public void testSymLink() throws Exception
{
boolean mayExit = false;
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
try {
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = dfs.getFileSystem();
String namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(1, namenode, 3);
List<String> args = new ArrayList<String>();
for (Map.Entry<String, String> entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
String argv[] = new String[] {
"-input", INPUT_FILE,
"-output", OUTPUT_DIR,
"-mapper", map,
"-reducer", reduce,
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf",
JobConf.MAPRED_MAP_TASK_JAVA_OPTS+ "=" +
"-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
"-Dbuild.test=" + System.getProperty("build.test") + " " +
conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
"-jobconf",
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS+ "=" +
"-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
"-Dbuild.test=" + System.getProperty("build.test") + " " +
conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
"-cacheFile", fileSys.getUri() + CACHE_FILE + "#testlink",
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
};
for (String arg : argv) {
args.add(arg);
}
argv = args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR), true);
DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString);
file.close();
file = fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString);
file.close();
job = new StreamJob(argv, mayExit);
job.go();
fileSys = dfs.getFileSystem();
String line = null;
Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
new Path(OUTPUT_DIR),
new Utils.OutputFileUtils
.OutputFilesFilter()));
for (int i = 0; i < fileList.length; i++){
System.out.println(fileList[i].toString());
BufferedReader bread =
new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line = bread.readLine();
System.out.println(line);
}
assertEquals(cacheString + "\t", line);
} finally{
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();}
}
}
public static void main(String[]args) throws Exception
{
new TestStreaming().testCommandLine();
}
}
| 5,209 | 35.433566 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingKeyValue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* This class tests hadoopStreaming in MapReduce local mode.
* This testcase looks at different cases of tab position in input.
*/
public class TestStreamingKeyValue
{
protected File INPUT_FILE = new File("target/input.txt");
protected File OUTPUT_DIR = new File("target/stream_out");
// First line of input has 'key' 'tab' 'value'
// Second line of input starts with a tab character.
// So, it has empty key and the whole line as value.
// Third line of input does not have any tab character.
// So, the whole line is the key and value is empty.
protected String input =
"roses are \tred\t\n\tviolets are blue\nbunnies are pink\n" +
"this is for testing a big\tinput line\n" +
"small input\n";
protected String outputWithoutKey =
"\tviolets are blue\nbunnies are pink\t\n" +
"roses are \tred\t\n" +
"small input\t\n" +
"this is for testing a big\tinput line\n";
protected String outputWithKey =
"0\troses are \tred\t\n" +
"16\t\tviolets are blue\n" +
"34\tbunnies are pink\n" +
"51\tthis is for testing a big\tinput line\n" +
"88\tsmall input\n";
private StreamJob job;
public TestStreamingKeyValue() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs(boolean ignoreKey) {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", TestStreaming.CAT,
"-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true",
"-jobconf", "stream.non.zero.exit.is.failure=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf", "stream.map.input.ignoreKey="+ignoreKey,
};
}
public void runStreamJob(final String outputExpect, boolean ignoreKey)
throws Exception
{
String outFileName = "part-00000";
File outFile = null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(ignoreKey), mayExit);
job.go();
outFile = new File(OUTPUT_DIR, outFileName).getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
/**
* Run the job with the indicating the input format key should be emitted.
*/
@Test
public void testCommandLineWithKey() throws Exception
{
runStreamJob(outputWithKey, false);
}
/**
* Run the job the default way (the input format key is not emitted).
*/
@Test
public void testCommandLineWithoutKey() throws Exception
{
runStreamJob(outputWithoutKey, true);
}
public static void main(String[]args) throws Exception
{
new TestStreamingKeyValue().testCommandLineWithKey();
new TestStreamingKeyValue().testCommandLineWithoutKey();
}
}
| 4,574 | 31.678571 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StreamAggregate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import org.apache.hadoop.streaming.Environment;
/**
Used to test the usage of external applications without adding
platform-specific dependencies.
*/
public class StreamAggregate extends TrApp
{
public StreamAggregate()
{
super('.', ' ');
}
public void go() throws IOException
{
testParentJobConfToEnvVars();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
while ((line = in.readLine()) != null) {
String [] words = line.split(" ");
for (int i = 0; i< words.length; i++) {
String out = "LongValueSum:" + words[i].trim() + "\t" + "1";
System.out.println(out);
}
}
}
public static void main(String[] args) throws IOException
{
TrApp app = new StreamAggregate();
app.go();
}
}
| 1,677 | 27.931034 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/OutputOnlyApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
/**
* An application that outputs a specified number of lines
* without consuming any input.
*/
public class OutputOnlyApp {
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.println("Usage: OutputOnlyApp NUMRECORDS");
return;
}
int numRecords = Integer.parseInt(args[0]);
while (numRecords-- > 0) {
System.out.println("key\tvalue");
}
}
}
| 1,304 | 32.461538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StderrApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
/**
* Output an arbitrary number of stderr lines before or after
* consuming the keys/values from stdin.
*/
public class StderrApp
{
/**
* Print preWriteLines to stderr, pausing sleep ms between each
* output, then consume stdin and echo it to stdout, then write
* postWriteLines to stderr.
*/
public static void go(int preWriteLines, int sleep, int postWriteLines) throws IOException {
go(preWriteLines, sleep, postWriteLines, false);
}
public static void go(int preWriteLines, int sleep, int postWriteLines, boolean status) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
if (status) {
System.err.println("reporter:status:starting echo");
}
while (preWriteLines > 0) {
--preWriteLines;
System.err.println("some stderr output before reading input, "
+ preWriteLines + " lines remaining, sleeping " + sleep);
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {}
}
while ((line = in.readLine()) != null) {
System.out.println(line);
}
while (postWriteLines > 0) {
--postWriteLines;
System.err.println("some stderr output after reading input, lines remaining "
+ postWriteLines);
}
}
public static void main(String[] args) throws IOException {
if (args.length < 3) {
System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE [STATUS]");
return;
}
int preWriteLines = Integer.parseInt(args[0]);
int sleep = Integer.parseInt(args[1]);
int postWriteLines = Integer.parseInt(args[2]);
boolean status = args.length > 3 ? Boolean.parseBoolean(args[3]) : false;
go(preWriteLines, sleep, postWriteLines, status);
}
}
| 2,702 | 33.21519 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingFailure.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
/**
* This class tests if hadoopStreaming returns Exception
* on failure when submitted an invalid/failed job
* The test case provides an invalid input file for map/reduce job as
* a unit test case
*/
public class TestStreamingFailure extends TestStreaming
{
protected File INVALID_INPUT_FILE;
public TestStreamingFailure() throws IOException
{
INVALID_INPUT_FILE = new File("invalid_input.txt");
}
@Override
protected void setInputOutput() {
inputFile = INVALID_INPUT_FILE.getAbsolutePath();
outDir = OUTPUT_DIR.getAbsolutePath();
}
@Override
@Test
public void testCommandLine() throws IOException {
int returnStatus = runStreamJob();
assertEquals("Streaming Job Failure code expected", 5, returnStatus);
}
}
| 1,727 | 29.857143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestRawBytesStreaming {
protected File INPUT_FILE = new File("target/input.txt");
protected File OUTPUT_DIR = new File("target/out");
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
protected String map = UtilTest.makeJavaCommand(RawBytesMapApp.class, new String[]{"."});
protected String reduce = UtilTest.makeJavaCommand(RawBytesReduceApp.class, new String[0]);
protected String outputExpect = "are\t3\nblue\t1\nbunnies\t1\npink\t1\nred\t1\nroses\t1\nviolets\t1\n";
public TestRawBytesStreaming() throws IOException {
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException {
DataOutputStream out = new DataOutputStream(new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", reduce,
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf", "stream.map.output=rawbytes",
"-jobconf", "stream.reduce.input=rawbytes",
"-verbose"
};
}
@Test
public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
OUTPUT_DIR.delete();
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
StreamJob job = new StreamJob();
job.setConf(new Configuration());
job.run(genArgs());
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.out.println(" map=" + map);
System.out.println("reduce=" + reduce);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
}
| 3,450 | 34.57732 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
/**
* This class tests that the '-file' argument to streaming results
* in files being unpacked in the job working directory.
*/
public class TestFileArgs extends TestStreaming
{
private MiniDFSCluster dfs = null;
private MiniMRCluster mr = null;
private FileSystem fileSys = null;
private String namenode = null;
private Configuration conf = null;
private static final String EXPECTED_OUTPUT =
"job.jar\t\nsidefile\t\n";
private static final String LS_PATH = Shell.WINDOWS ? "cmd /c dir /B" :
"/bin/ls";
public TestFileArgs() throws IOException
{
// Set up mini cluster
conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().getAuthority();
mr = new MiniMRCluster(1, namenode, 1);
map = LS_PATH;
FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
setTestDir(new File("/tmp/TestFileArgs"));
}
@Before
@Override
public void setUp() throws IOException {
// Set up side file
FileSystem localFs = FileSystem.getLocal(conf);
DataOutputStream dos = localFs.create(new Path("target/sidefile"));
dos.write("hello world\n".getBytes("UTF-8"));
dos.close();
// Since ls doesn't read stdin, we don't want to write anything
// to it, or else we risk Broken Pipe exceptions.
input = "";
}
@After
@Override
public void tearDown() {
if (mr != null) {
mr.shutdown();
}
if (dfs != null) {
dfs.shutdown();
}
}
@Override
protected String getExpectedOutput() {
return EXPECTED_OUTPUT;
}
@Override
protected Configuration getConf() {
return conf;
}
@Override
protected String[] genArgs() {
for (Map.Entry<String, String> entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
args.add("-file");
args.add(new java.io.File("target/sidefile").getAbsolutePath());
args.add("-numReduceTasks");
args.add("0");
args.add("-jobconf");
args.add("mapred.jar=" + STREAMING_JAR);
args.add("-verbose");
return super.genArgs();
}
}
| 3,410 | 27.90678 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleCachefiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.Utils;
/**
* This test case tests the symlink creation
* utility provided by distributed caching
*/
public class TestMultipleCachefiles
{
String INPUT_FILE = "/testing-streaming/input.txt";
String OUTPUT_DIR = "/testing-streaming/out";
String CACHE_FILE = "/testing-streaming/cache.txt";
String CACHE_FILE_2 = "/testing-streaming/cache2.txt";
String input = "check to see if we can read this none reduce";
String map = TestStreaming.XARGS_CAT;
String reduce = TestStreaming.CAT;
String mapString = "testlink";
String mapString2 = "testlink2";
String cacheString = "This is just the cache string";
String cacheString2 = "This is just the second cache string";
StreamJob job;
public TestMultipleCachefiles() throws IOException
{
}
@Test
public void testMultipleCachefiles() throws Exception
{
boolean mayExit = false;
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
try{
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = dfs.getFileSystem();
String namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(1, namenode, 3);
List<String> args = new ArrayList<String>();
for (Map.Entry<String, String> entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
String argv[] = new String[] {
"-input", INPUT_FILE,
"-output", OUTPUT_DIR,
"-mapper", map,
"-reducer", reduce,
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf",
JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" +
"-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
"-Dbuild.test=" + System.getProperty("build.test") + " " +
conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
"-jobconf",
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" +
"-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
"-Dbuild.test=" + System.getProperty("build.test") + " " +
conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
"-cacheFile", fileSys.getUri() + CACHE_FILE + "#" + mapString,
"-cacheFile", fileSys.getUri() + CACHE_FILE_2 + "#" + mapString2,
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
};
for (String arg : argv) {
args.add(arg);
}
argv = args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR), true);
DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString + "\n");
file.writeBytes(mapString2 + "\n");
file.close();
file = fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString + "\n");
file.close();
file = fileSys.create(new Path(CACHE_FILE_2));
file.writeBytes(cacheString2 + "\n");
file.close();
job = new StreamJob(argv, mayExit);
job.go();
fileSys = dfs.getFileSystem();
String line = null;
String line2 = null;
Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
new Path(OUTPUT_DIR),
new Utils.OutputFileUtils
.OutputFilesFilter()));
for (int i = 0; i < fileList.length; i++){
System.out.println(fileList[i].toString());
BufferedReader bread =
new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line = bread.readLine();
System.out.println(line);
line2 = bread.readLine();
System.out.println(line2);
}
assertEquals(cacheString + "\t", line);
assertEquals(cacheString2 + "\t", line2);
} finally{
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();}
}
}
public static void main(String[]args) throws Exception
{
new TestMultipleCachefiles().testMultipleCachefiles();
}
}
| 5,653 | 34.78481 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
public class TestStreamingBadRecords extends ClusterMapReduceTestCase
{
private static final Log LOG =
LogFactory.getLog(TestStreamingBadRecords.class);
private static final List<String> MAPPER_BAD_RECORDS =
Arrays.asList("hey022","hey023","hey099");
private static final List<String> REDUCER_BAD_RECORDS =
Arrays.asList("hey001","hey018");
private static final String badMapper =
UtilTest.makeJavaCommand(BadApp.class, new String[]{});
private static final String badReducer =
UtilTest.makeJavaCommand(BadApp.class, new String[]{"true"});
private static final int INPUTSIZE=100;
public TestStreamingBadRecords() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void setUp() throws Exception {
Properties props = new Properties();
props.setProperty(JTConfig.JT_RETIREJOBS, "false");
props.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "false");
startCluster(true, props);
}
private void createInput() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
Writer wr = new OutputStreamWriter(os);
//increasing the record size so that we have stream flushing
String prefix = new String(new byte[20*1024]);
for(int i=1;i<=INPUTSIZE;i++) {
String str = ""+i;
int zerosToPrepend = 3 - str.length();
for(int j=0;j<zerosToPrepend;j++){
str = "0"+str;
}
wr.write(prefix + "hey"+str+"\n");
}wr.close();
}
private void validateOutput(RunningJob runningJob, boolean validateCount)
throws Exception {
LOG.info(runningJob.getCounters().toString());
assertTrue(runningJob.isSuccessful());
if(validateCount) {
//validate counters
String counterGrp = "org.apache.hadoop.mapred.Task$Counter";
Counters counters = runningJob.getCounters();
assertEquals(counters.findCounter(counterGrp, "MAP_SKIPPED_RECORDS").
getCounter(),MAPPER_BAD_RECORDS.size());
int mapRecs = INPUTSIZE - MAPPER_BAD_RECORDS.size();
assertEquals(counters.findCounter(counterGrp, "MAP_INPUT_RECORDS").
getCounter(),mapRecs);
assertEquals(counters.findCounter(counterGrp, "MAP_OUTPUT_RECORDS").
getCounter(),mapRecs);
int redRecs = mapRecs - REDUCER_BAD_RECORDS.size();
assertEquals(counters.findCounter(counterGrp, "REDUCE_SKIPPED_RECORDS").
getCounter(),REDUCER_BAD_RECORDS.size());
assertEquals(counters.findCounter(counterGrp, "REDUCE_SKIPPED_GROUPS").
getCounter(),REDUCER_BAD_RECORDS.size());
assertEquals(counters.findCounter(counterGrp, "REDUCE_INPUT_GROUPS").
getCounter(),redRecs);
assertEquals(counters.findCounter(counterGrp, "REDUCE_INPUT_RECORDS").
getCounter(),redRecs);
assertEquals(counters.findCounter(counterGrp, "REDUCE_OUTPUT_RECORDS").
getCounter(),redRecs);
}
List<String> badRecs = new ArrayList<String>();
badRecs.addAll(MAPPER_BAD_RECORDS);
badRecs.addAll(REDUCER_BAD_RECORDS);
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(getOutputDir(),
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
StringTokenizer tokeniz = new StringTokenizer(line, "\t");
String value = tokeniz.nextToken();
int index = value.indexOf("hey");
assertTrue(index>-1);
if(index>-1) {
String heyStr = value.substring(index);
assertTrue(!badRecs.contains(heyStr));
}
line = reader.readLine();
}
reader.close();
if(validateCount) {
assertEquals(INPUTSIZE-badRecs.size(), counter);
}
}
}
/*
* Disable test as skipping bad records not supported in 0.23
*/
/*
public void testSkip() throws Exception {
JobConf clusterConf = createJobConf();
createInput();
int attSkip =0;
SkipBadRecords.setAttemptsToStartSkipping(clusterConf,attSkip);
//the no of attempts to successfully complete the task depends
//on the no of bad records.
int mapperAttempts = attSkip+1+MAPPER_BAD_RECORDS.size();
int reducerAttempts = attSkip+1+REDUCER_BAD_RECORDS.size();
String[] args = new String[] {
"-input", (new Path(getInputDir(), "text.txt")).toString(),
"-output", getOutputDir().toString(),
"-mapper", badMapper,
"-reducer", badReducer,
"-verbose",
"-inputformat", "org.apache.hadoop.mapred.KeyValueTextInputFormat",
"-jobconf", "mapreduce.task.skip.start.attempts="+attSkip,
"-jobconf", "mapreduce.job.skip.outdir=none",
"-jobconf", "mapreduce.map.maxattempts="+mapperAttempts,
"-jobconf", "mapreduce.reduce.maxattempts="+reducerAttempts,
"-jobconf", "mapreduce.map.skip.maxrecords="+Long.MAX_VALUE,
"-jobconf", "mapreduce.reduce.skip.maxgroups="+Long.MAX_VALUE,
"-jobconf", "mapreduce.job.maps=1",
"-jobconf", "mapreduce.job.reduces=1",
"-jobconf", "fs.default.name="+clusterConf.get("fs.default.name"),
"-jobconf", "mapreduce.jobtracker.address=" +
clusterConf.get(JTConfig.JT_IPC_ADDRESS),
"-jobconf", "mapreduce.jobtracker.http.address="
+clusterConf.get(JTConfig.JT_HTTP_ADDRESS),
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
"-jobconf", "mapreduce.framework.name=yarn"
};
StreamJob job = new StreamJob(args, false);
job.go();
validateOutput(job.running_, false);
//validate that there is no skip directory as it has been set to "none"
assertTrue(SkipBadRecords.getSkipOutputPath(job.jobConf_)==null);
}
*/
/*
* Disable test as skipping bad records not supported in 0.23
*/
/*
public void testNarrowDown() throws Exception {
createInput();
JobConf clusterConf = createJobConf();
String[] args = new String[] {
"-input", (new Path(getInputDir(), "text.txt")).toString(),
"-output", getOutputDir().toString(),
"-mapper", badMapper,
"-reducer", badReducer,
"-verbose",
"-inputformat", "org.apache.hadoop.mapred.KeyValueTextInputFormat",
"-jobconf", "mapreduce.task.skip.start.attempts=1",
//actually fewer attempts are required than specified
//but to cater to the case of slow processed counter update, need to
//have more attempts
"-jobconf", "mapreduce.map.maxattempts=20",
"-jobconf", "mapreduce.reduce.maxattempts=15",
"-jobconf", "mapreduce.map.skip.maxrecords=1",
"-jobconf", "mapreduce.reduce.skip.maxgroups=1",
"-jobconf", "mapreduce.job.maps=1",
"-jobconf", "mapreduce.job.reduces=1",
"-jobconf", "fs.default.name="+clusterConf.get("fs.default.name"),
"-jobconf", "mapreduce.jobtracker.address="+clusterConf.get(JTConfig.JT_IPC_ADDRESS),
"-jobconf", "mapreduce.jobtracker.http.address="
+clusterConf.get(JTConfig.JT_HTTP_ADDRESS),
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
"-jobconf", "mapreduce.framework.name=yarn"
};
StreamJob job = new StreamJob(args, false);
job.go();
validateOutput(job.running_, true);
assertTrue(SkipBadRecords.getSkipOutputPath(job.jobConf_)!=null);
}
*/
public void testNoOp() {
// Added to avoid warnings when running this disabled test
}
static class App{
boolean isReducer;
public App(String[] args) throws Exception{
if(args.length>0) {
isReducer = Boolean.parseBoolean(args[0]);
}
String counter = SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS;
if(isReducer) {
counter = SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS;
}
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
int count = 0;
while ((line = in.readLine()) != null) {
processLine(line);
count++;
if(count>=10) {
System.err.println("reporter:counter:"+SkipBadRecords.COUNTER_GROUP+
","+counter+","+count);
count = 0;
}
}
}
protected void processLine(String line) throws Exception{
System.out.println(line);
}
public static void main(String[] args) throws Exception{
new App(args);
}
}
static class BadApp extends App{
public BadApp(String[] args) throws Exception {
super(args);
}
protected void processLine(String line) throws Exception {
List<String> badRecords = MAPPER_BAD_RECORDS;
if(isReducer) {
badRecords = REDUCER_BAD_RECORDS;
}
if(badRecords.size()>0 && line.contains(badRecords.get(0))) {
LOG.warn("Encountered BAD record");
System.exit(-1);
}
else if(badRecords.size()>1 && line.contains(badRecords.get(1))) {
LOG.warn("Encountered BAD record");
throw new Exception("Got bad record..crashing");
}
else if(badRecords.size()>2 && line.contains(badRecords.get(2))) {
LOG.warn("Encountered BAD record");
System.exit(-1);
}
super.processLine(line);
}
public static void main(String[] args) throws Exception{
new BadApp(args);
}
}
}
| 11,637 | 35.597484 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestLoadTypedBytes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.typedbytes.TypedBytesOutput;
import org.apache.hadoop.typedbytes.TypedBytesWritable;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestLoadTypedBytes {
@Test
public void testLoading() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FileSystem fs = cluster.getFileSystem();
ByteArrayOutputStream out = new ByteArrayOutputStream();
TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(out));
for (int i = 0; i < 100; i++) {
tboutput.write(new Long(i)); // key
tboutput.write("" + (10 * i)); // value
}
InputStream isBackup = System.in;
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
System.setIn(in);
LoadTypedBytes loadtb = new LoadTypedBytes(conf);
try {
Path root = new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
String[] args = new String[1];
args[0] = "/typedbytestest/test.seq";
int ret = loadtb.run(args);
assertEquals("Return value != 0.", 0, ret);
Path file = new Path(root, "test.seq");
assertTrue(fs.exists(file));
SequenceFile.Reader reader = new SequenceFile.Reader(fs, file, conf);
int counter = 0;
TypedBytesWritable key = new TypedBytesWritable();
TypedBytesWritable value = new TypedBytesWritable();
while (reader.next(key, value)) {
assertEquals(Long.class, key.getValue().getClass());
assertEquals(String.class, value.getValue().getClass());
assertTrue("Invalid record.",
Integer.parseInt(value.toString()) % 10 == 0);
counter++;
}
assertEquals("Wrong number of records.", 100, counter);
} finally {
try {
fs.close();
} catch (Exception e) {
}
System.setIn(isBackup);
cluster.shutdown();
}
}
}
| 3,206 | 33.858696 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestTypedBytesStreaming.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTypedBytesStreaming {
protected File INPUT_FILE = new File("target/input.txt");
protected File OUTPUT_DIR = new File("target/out");
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
protected String map = UtilTest.makeJavaCommand(TypedBytesMapApp.class, new String[]{"."});
protected String reduce = UtilTest.makeJavaCommand(TypedBytesReduceApp.class, new String[0]);
protected String outputExpect = "are\t3\nred\t1\nblue\t1\npink\t1\nroses\t1\nbunnies\t1\nviolets\t1\n";
public TestTypedBytesStreaming() throws IOException {
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException {
DataOutputStream out = new DataOutputStream(new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", reduce,
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-io", "typedbytes"
};
}
@Before
@After
public void cleanupOutput() throws Exception {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
INPUT_FILE.delete();
createInput();
}
@Test
public void testCommandLine() throws Exception {
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
StreamJob job = new StreamJob();
job.setConf(new Configuration());
job.run(genArgs());
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.out.println(" map=" + map);
System.out.println("reduce=" + reduce);
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
}
}
| 3,288 | 34.75 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingExitStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import org.junit.Before;
import static org.junit.Assert.*;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* This class tests if hadoopStreaming fails a job when the mapper or
* reducers have non-zero exit status and the
* stream.non.zero.exit.status.is.failure jobconf is set.
*/
public class TestStreamingExitStatus
{
protected File TEST_DIR =
new File("target/TestStreamingExitStatus").getAbsoluteFile();
protected File INPUT_FILE = new File(TEST_DIR, "input.txt");
protected File OUTPUT_DIR = new File(TEST_DIR, "out");
protected String failingTask = UtilTest.makeJavaCommand(FailApp.class, new String[]{"true"});
protected String echoTask = UtilTest.makeJavaCommand(FailApp.class, new String[]{"false"});
public TestStreamingExitStatus() throws IOException {
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected String[] genArgs(boolean exitStatusIsFailure, boolean failMap) {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", (failMap ? failingTask : echoTask),
"-reducer", (failMap ? echoTask : failingTask),
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "stream.non.zero.exit.is.failure=" + exitStatusIsFailure,
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
"-jobconf", "mapreduce.task.io.sort.mb=10"
};
}
@Before
public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
FileOutputStream out = new FileOutputStream(INPUT_FILE.getAbsoluteFile());
out.write("hello\n".getBytes());
out.close();
}
public void runStreamJob(boolean exitStatusIsFailure, boolean failMap) throws Exception {
boolean mayExit = false;
int returnStatus = 0;
StreamJob job = new StreamJob(genArgs(exitStatusIsFailure, failMap), mayExit);
returnStatus = job.go();
if (exitStatusIsFailure) {
assertEquals("Streaming Job failure code expected", /*job not successful:*/1, returnStatus);
} else {
assertEquals("Streaming Job expected to succeed", 0, returnStatus);
}
}
@Test
public void testMapFailOk() throws Exception {
runStreamJob(false, true);
}
@Test
public void testMapFailNotOk() throws Exception {
runStreamJob(true, true);
}
@Test
public void testReduceFailOk() throws Exception {
runStreamJob(false, false);
}
@Test
public void testReduceFailNotOk() throws Exception {
runStreamJob(true, false);
}
}
| 3,640 | 32.1 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrAppReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import org.apache.hadoop.streaming.Environment;
/** A minimal Java implementation of /usr/bin/tr.
* Used to test the usage of external applications without adding
* platform-specific dependencies.
* Use TrAppReduce as reducer only. For mapper, use TrApp.
*/
public class TrAppReduce
{
public TrAppReduce(char find, char replace)
{
this.find = find;
this.replace = replace;
}
void testParentJobConfToEnvVars() throws IOException
{
env = new Environment();
// test that some JobConf properties are exposed as expected
// Note the dots translated to underscore:
// property names have been escaped in PipeMapRed.safeEnvVarName()
expect("mapreduce_jobtracker_address", "local");
//expect("mapred_local_dir", "build/test/mapred/local");
expectDefined("mapreduce_cluster_local_dir");
expect("mapred_output_format_class", "org.apache.hadoop.mapred.TextOutputFormat");
expect("mapreduce_job_output_key_class", "org.apache.hadoop.io.Text");
expect("mapreduce_job_output_value_class", "org.apache.hadoop.io.Text");
expect("mapreduce_task_ismap", "false");
expectDefined("mapreduce_task_attempt_id");
expectDefined("mapreduce_task_io_sort_factor");
// the FileSplit context properties are not available in local hadoop..
// so can't check them in this test.
}
// this runs in a subprocess; won't use JUnit's assertTrue()
void expect(String evName, String evVal) throws IOException
{
String got = env.getProperty(evName);
if (!evVal.equals(got)) {
String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal;
throw new IOException(msg);
}
}
void expectDefined(String evName) throws IOException
{
String got = env.getProperty(evName);
if (got == null) {
String msg = "FAIL evName=" + evName + " is undefined. Expect defined.";
throw new IOException(msg);
}
}
public void go() throws IOException
{
testParentJobConfToEnvVars();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
while ((line = in.readLine()) != null) {
String out = line.replace(find, replace);
System.out.println(out);
}
}
public static void main(String[] args) throws IOException
{
args[0] = CUnescape(args[0]);
args[1] = CUnescape(args[1]);
TrAppReduce app = new TrAppReduce(args[0].charAt(0), args[1].charAt(0));
app.go();
}
public static String CUnescape(String s)
{
if (s.equals("\\n")) {
return "\n";
} else {
return s;
}
}
char find;
char replace;
Environment env;
}
| 3,511 | 30.079646 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/FailApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
/**
* A simple Java app that will consume all input from stdin, echoing
* it to stdout, and then optionally throw an exception (which should
* cause a non-zero exit status for the process).
*/
public class FailApp
{
public FailApp() {
}
public void go(boolean fail) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
// Consume all input (to make sure streaming will still count this
// task as failed even if all input was consumed).
while ((line = in.readLine()) != null) {
System.out.println(line);
}
if (fail) {
throw new RuntimeException("Intentionally failing task");
}
}
public static void main(String[] args) throws IOException {
boolean fail = true;
if (args.length >= 1 && "false".equals(args[0])) {
fail = false;
}
FailApp app = new FailApp();
app.go(fail);
}
}
| 1,791 | 29.372881 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesReduceApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.typedbytes.TypedBytesInput;
import org.apache.hadoop.typedbytes.TypedBytesOutput;
public class TypedBytesReduceApp {
public void go() throws IOException {
TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(System.out));
Object prevKey = null;
int sum = 0;
Object key = tbinput.read();
while (key != null) {
if (prevKey != null && !key.equals(prevKey)) {
tboutput.write(prevKey); // write key
tboutput.write(sum); // write value
sum = 0;
}
sum += (Integer) tbinput.read();
prevKey = key;
key = tbinput.read();
}
tboutput.write(prevKey);
tboutput.write(sum);
System.out.flush();
}
public static void main(String[] args) throws IOException {
TypedBytesReduceApp app = new TypedBytesReduceApp();
app.go();
}
}
| 1,901 | 31.237288 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UniqApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.Date;
/** A minimal Java implementation of /usr/bin/uniq
Used to test the usage of external applications without adding
platform-specific dependencies.
Uniques lines and prepends a header on the line.
*/
public class UniqApp
{
public UniqApp(String header)
{
this.header = header;
}
public void go() throws IOException
{
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
String prevLine = null;
while ((line = in.readLine()) != null) {
if (!line.equals(prevLine)) {
System.out.println(header + line);
}
prevLine = line;
}
}
public static void main(String[] args) throws IOException
{
String h = (args.length < 1) ? "" : args[0];
UniqApp app = new UniqApp(h);
app.go();
}
String header;
}
| 1,705 | 28.413793 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreaming.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.util.JarFinder;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
/**
* This class tests hadoopStreaming in MapReduce local mode.
*/
public class TestStreaming
{
public static final String STREAMING_JAR = JarFinder.getJar(StreamJob.class);
/**
* cat command used for copying stdin to stdout as mapper or reducer function.
* On Windows, use a cmd script that approximates the functionality of cat.
*/
static final String CAT = Shell.WINDOWS ?
"cmd /c " + new File("target/bin/cat.cmd").getAbsolutePath() : "cat";
/**
* Command used for iterating through file names on stdin and copying each
* file's contents to stdout, used as mapper or reducer function. On Windows,
* use a cmd script that approximates the functionality of xargs cat.
*/
static final String XARGS_CAT = Shell.WINDOWS ?
"cmd /c " + new File("target/bin/xargs_cat.cmd").getAbsolutePath() :
"xargs cat";
// "map" command: grep -E (red|green|blue)
// reduce command: uniq
protected File TEST_DIR;
protected File INPUT_FILE;
protected File OUTPUT_DIR;
protected String inputFile;
protected String outDir;
protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n";
// map behaves like "/usr/bin/tr . \\n"; (split words into lines)
protected String map = UtilTest.makeJavaCommand(TrApp.class, new String[]{".", "\\n"});
// reduce behave like /usr/bin/uniq. But also prepend lines with R.
// command-line combiner does not have any effect any more.
protected String reduce = UtilTest.makeJavaCommand(UniqApp.class, new String[]{"R"});
protected String outputExpect = "Rare\t\nRblue\t\nRbunnies\t\nRpink\t\nRred\t\nRroses\t\nRviolets\t\n";
protected ArrayList<String> args = new ArrayList<String>();
protected StreamJob job;
public TestStreaming() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
setTestDir(new File("target/TestStreaming").getAbsoluteFile());
}
/**
* Sets root of test working directory and resets any other paths that must be
* children of the test working directory. Typical usage is for subclasses
* that use HDFS to override the test directory to the form "/tmp/<test name>"
* so that on Windows, tests won't attempt to use paths containing a ':' from
* the drive specifier. The ':' character is considered invalid by HDFS.
*
* @param testDir File to set
*/
protected void setTestDir(File testDir) {
TEST_DIR = testDir;
OUTPUT_DIR = new File(testDir, "out");
INPUT_FILE = new File(testDir, "input.txt");
}
@Before
public void setUp() throws IOException {
UtilTest.recursiveDelete(TEST_DIR);
assertTrue("Creating " + TEST_DIR, TEST_DIR.mkdirs());
args.clear();
}
@After
public void tearDown() {
UtilTest.recursiveDelete(TEST_DIR);
}
protected String getInputData() {
return input;
}
protected void createInput() throws IOException
{
DataOutputStream out = getFileSystem().create(new Path(
INPUT_FILE.getPath()));
out.write(getInputData().getBytes("UTF-8"));
out.close();
}
protected void setInputOutput() {
inputFile = INPUT_FILE.getPath();
outDir = OUTPUT_DIR.getPath();
}
protected String[] genArgs() {
args.add("-input");args.add(inputFile);
args.add("-output");args.add(outDir);
args.add("-mapper");args.add(map);
args.add("-reducer");args.add(reduce);
args.add("-jobconf");
args.add("mapreduce.task.files.preserve.failedtasks=true");
args.add("-jobconf");
args.add("stream.tmpdir="+System.getProperty("test.build.data","/tmp"));
String str[] = new String [args.size()];
args.toArray(str);
return str;
}
protected Configuration getConf() {
return new Configuration();
}
protected FileSystem getFileSystem() throws IOException {
return FileSystem.get(getConf());
}
protected String getExpectedOutput() {
return outputExpect;
}
protected void checkOutput() throws IOException {
Path outPath = new Path(OUTPUT_DIR.getPath(), "part-00000");
FileSystem fs = getFileSystem();
String output = StreamUtil.slurpHadoop(outPath, fs);
fs.delete(outPath, true);
System.err.println("outEx1=" + getExpectedOutput());
System.err.println(" out1=" + output);
assertOutput(getExpectedOutput(), output);
}
protected void assertOutput(String expectedOutput, String output) throws IOException {
String[] words = expectedOutput.split("\t\n");
Set<String> expectedWords = new HashSet<String>(Arrays.asList(words));
words = output.split("\t\n");
Set<String> returnedWords = new HashSet<String>(Arrays.asList(words));
// PrintWriter writer = new PrintWriter(new OutputStreamWriter(new FileOutputStream(new File("/tmp/tucu.txt"), true)), true);
// writer.println("** Expected: " + expectedOutput);
// writer.println("** Output : " + output);
assertTrue(returnedWords.containsAll(expectedWords));
}
/**
* Runs a streaming job with the given arguments
* @return the streaming job return status
* @throws IOException
*/
protected int runStreamJob() throws IOException {
setInputOutput();
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
return job.go();
}
@Test
public void testCommandLine() throws Exception
{
int ret = runStreamJob();
assertEquals(0, ret);
checkOutput();
}
}
| 6,825 | 32.460784 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/io/TestKeyOnlyTextOutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.streaming.PipeMapRed;
import org.apache.hadoop.streaming.PipeMapper;
import org.junit.Test;
public class TestKeyOnlyTextOutputReader {
@Test
public void testKeyOnlyTextOutputReader() throws IOException {
String text = "key,value\nkey2,value2\nnocomma\n";
PipeMapRed pipeMapRed = new MyPipeMapRed(text);
KeyOnlyTextOutputReader outputReader = new KeyOnlyTextOutputReader();
outputReader.initialize(pipeMapRed);
outputReader.readKeyValue();
Assert.assertEquals(new Text("key,value"), outputReader.getCurrentKey());
outputReader.readKeyValue();
Assert.assertEquals(new Text("key2,value2"), outputReader.getCurrentKey());
outputReader.readKeyValue();
Assert.assertEquals(new Text("nocomma"), outputReader.getCurrentKey());
Assert.assertEquals(false, outputReader.readKeyValue());
}
private class MyPipeMapRed extends PipeMapper {
private DataInput clientIn;
private Configuration conf = new Configuration();
public MyPipeMapRed(String text) {
clientIn = new DataInputStream(new ByteArrayInputStream(text.getBytes()));
}
@Override
public DataInput getClientInput() {
return clientIn;
}
@Override
public Configuration getConfiguration() {
return conf;
}
}
}
| 2,386 | 33.594203 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.typedbytes;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
public class TestTypedBytesWritable extends TestCase {
public void testToString() {
TypedBytesWritable tbw = new TypedBytesWritable();
tbw.setValue(true);
assertEquals("true", tbw.toString());
tbw.setValue(12345);
assertEquals("12345", tbw.toString());
tbw.setValue(123456789L);
assertEquals("123456789", tbw.toString());
tbw.setValue((float) 1.23);
assertEquals("1.23", tbw.toString());
tbw.setValue(1.23456789);
assertEquals("1.23456789", tbw.toString());
tbw.setValue("random text");
assertEquals("random text", tbw.toString());
}
public void testIO() throws IOException {
TypedBytesWritable tbw = new TypedBytesWritable();
tbw.setValue(12345);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutput dout = new DataOutputStream(baos);
tbw.write(dout);
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
DataInput din = new DataInputStream(bais);
TypedBytesWritable readTbw = new TypedBytesWritable();
readTbw.readFields(din);
assertEquals(tbw, readTbw);
}
}
| 2,204 | 34 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
public class StreamKeyValUtil {
/**
* Find the first occured tab in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @param start starting offset
* @param length no. of bytes
* @return position that first tab occures otherwise -1
*/
public static int findTab(byte [] utf, int start, int length) {
for(int i=start; i<(start+length); i++) {
if (utf[i]==(byte)'\t') {
return i;
}
}
return -1;
}
/**
* Find the first occured tab in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @return position that first tab occures otherwise -1
*/
public static int findTab(byte [] utf) {
return org.apache.hadoop.util.UTF8ByteArrayUtils.findNthByte(utf, 0,
utf.length, (byte)'\t', 1);
}
/**
* split a UTF-8 byte array into key and value
* assuming that the delimilator is at splitpos.
* @param utf utf-8 encoded string
* @param start starting offset
* @param length no. of bytes
* @param key contains key upon the method is returned
* @param val contains value upon the method is returned
* @param splitPos the split pos
* @param separatorLength the length of the separator between key and value
* @throws IOException
*/
public static void splitKeyVal(byte[] utf, int start, int length,
Text key, Text val, int splitPos,
int separatorLength) throws IOException {
if (splitPos<start || splitPos >= (start+length))
throw new IllegalArgumentException("splitPos must be in the range " +
"[" + start + ", " + (start+length) + "]: " + splitPos);
int keyLen = (splitPos-start);
int valLen = (start+length)-splitPos-separatorLength;
key.set(utf, start, keyLen);
val.set(utf, splitPos+separatorLength, valLen);
}
/**
* split a UTF-8 byte array into key and value
* assuming that the delimilator is at splitpos.
* @param utf utf-8 encoded string
* @param start starting offset
* @param length no. of bytes
* @param key contains key upon the method is returned
* @param val contains value upon the method is returned
* @param splitPos the split pos
* @throws IOException
*/
public static void splitKeyVal(byte[] utf, int start, int length,
Text key, Text val, int splitPos) throws IOException {
splitKeyVal(utf, start, length, key, val, splitPos, 1);
}
/**
* split a UTF-8 byte array into key and value
* assuming that the delimilator is at splitpos.
* @param utf utf-8 encoded string
* @param key contains key upon the method is returned
* @param val contains value upon the method is returned
* @param splitPos the split pos
* @param separatorLength the length of the separator between key and value
* @throws IOException
*/
public static void splitKeyVal(byte[] utf, Text key, Text val, int splitPos,
int separatorLength)
throws IOException {
splitKeyVal(utf, 0, utf.length, key, val, splitPos, separatorLength);
}
/**
* split a UTF-8 byte array into key and value
* assuming that the delimilator is at splitpos.
* @param utf utf-8 encoded string
* @param key contains key upon the method is returned
* @param val contains value upon the method is returned
* @param splitPos the split pos
* @throws IOException
*/
public static void splitKeyVal(byte[] utf, Text key, Text val, int splitPos)
throws IOException {
splitKeyVal(utf, 0, utf.length, key, val, splitPos, 1);
}
/**
* Read a utf8 encoded line from a data input stream.
* @param lineReader LineReader to read the line from.
* @param out Text to read into
* @return number of bytes read
* @throws IOException
*/
public static int readLine(LineReader lineReader, Text out)
throws IOException {
out.clear();
return lineReader.readLine(out);
}
}
| 5,013 | 35.333333 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.Map.Entry;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Properties;
import org.apache.commons.logging.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.streaming.io.InputWriter;
import org.apache.hadoop.streaming.io.OutputReader;
import org.apache.hadoop.streaming.io.TextInputWriter;
import org.apache.hadoop.streaming.io.TextOutputReader;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.io.Text;
/** Shared functionality for PipeMapper, PipeReducer.
*/
public abstract class PipeMapRed {
protected static final Log LOG = LogFactory.getLog(PipeMapRed.class.getName());
/**
* Returns the Configuration.
*/
public Configuration getConfiguration() {
return job_;
}
/**
* Returns the DataOutput to which the client input is written.
*/
public DataOutput getClientOutput() {
return clientOut_;
}
/**
* Returns the DataInput from which the client output is read.
*/
public DataInput getClientInput() {
return clientIn_;
}
/**
* Returns the input separator to be used.
*/
public abstract byte[] getInputSeparator();
/**
* Returns the field separator to be used.
*/
public abstract byte[] getFieldSeparator();
/**
* Returns the number of key fields.
*/
public abstract int getNumOfKeyFields();
/**
* Returns the command to be spawned as a subprocess.
* Mapper/Reducer operations will delegate to it
*/
abstract String getPipeCommand(JobConf job);
abstract boolean getDoPipe();
final static int OUTSIDE = 1;
final static int SINGLEQ = 2;
final static int DOUBLEQ = 3;
private final static int BUFFER_SIZE = 128 * 1024;
static String[] splitArgs(String args) {
ArrayList argList = new ArrayList();
char[] ch = args.toCharArray();
int clen = ch.length;
int state = OUTSIDE;
int argstart = 0;
for (int c = 0; c <= clen; c++) {
boolean last = (c == clen);
int lastState = state;
boolean endToken = false;
if (!last) {
if (ch[c] == '\'') {
if (state == OUTSIDE) {
state = SINGLEQ;
} else if (state == SINGLEQ) {
state = OUTSIDE;
}
endToken = (state != lastState);
} else if (ch[c] == '"') {
if (state == OUTSIDE) {
state = DOUBLEQ;
} else if (state == DOUBLEQ) {
state = OUTSIDE;
}
endToken = (state != lastState);
} else if (ch[c] == ' ') {
if (state == OUTSIDE) {
endToken = true;
}
}
}
if (last || endToken) {
if (c == argstart) {
// unquoted space
} else {
String a;
a = args.substring(argstart, c);
argList.add(a);
}
argstart = c + 1;
lastState = state;
}
}
return (String[]) argList.toArray(new String[0]);
}
public void configure(JobConf job) {
try {
String argv = getPipeCommand(job);
joinDelay_ = job.getLong("stream.joindelay.milli", 0);
job_ = job;
mapInputWriterClass_ =
job_.getClass("stream.map.input.writer.class",
TextInputWriter.class, InputWriter.class);
mapOutputReaderClass_ =
job_.getClass("stream.map.output.reader.class",
TextOutputReader.class, OutputReader.class);
reduceInputWriterClass_ =
job_.getClass("stream.reduce.input.writer.class",
TextInputWriter.class, InputWriter.class);
reduceOutputReaderClass_ =
job_.getClass("stream.reduce.output.reader.class",
TextOutputReader.class, OutputReader.class);
nonZeroExitIsFailure_ = job_.getBoolean("stream.non.zero.exit.is.failure", true);
doPipe_ = getDoPipe();
if (!doPipe_) return;
setStreamJobDetails(job);
String[] argvSplit = splitArgs(argv);
String prog = argvSplit[0];
File currentDir = new File(".").getAbsoluteFile();
if (new File(prog).isAbsolute()) {
// we don't own it. Hope it is executable
} else {
FileUtil.chmod(new File(currentDir, prog).toString(), "a+x");
}
//
// argvSplit[0]:
// An absolute path should be a preexisting valid path on all TaskTrackers
// A relative path is converted into an absolute pathname by looking
// up the PATH env variable. If it still fails, look it up in the
// tasktracker's local working directory
//
if (!new File(argvSplit[0]).isAbsolute()) {
PathFinder finder = new PathFinder("PATH");
finder.prependPathComponent(currentDir.toString());
File f = finder.getAbsolutePath(argvSplit[0]);
if (f != null) {
argvSplit[0] = f.getAbsolutePath();
}
f = null;
}
LOG.info("PipeMapRed exec " + Arrays.asList(argvSplit));
Environment childEnv = (Environment) StreamUtil.env().clone();
addJobConfToEnvironment(job_, childEnv);
addEnvironment(childEnv, job_.get("stream.addenvironment"));
// add TMPDIR environment variable with the value of java.io.tmpdir
envPut(childEnv, "TMPDIR", System.getProperty("java.io.tmpdir"));
// Start the process
ProcessBuilder builder = new ProcessBuilder(argvSplit);
builder.environment().putAll(childEnv.toMap());
sim = builder.start();
clientOut_ = new DataOutputStream(new BufferedOutputStream(
sim.getOutputStream(),
BUFFER_SIZE));
clientIn_ = new DataInputStream(new BufferedInputStream(
sim.getInputStream(),
BUFFER_SIZE));
clientErr_ = new DataInputStream(new BufferedInputStream(sim.getErrorStream()));
startTime_ = System.currentTimeMillis();
} catch (IOException e) {
LOG.error("configuration exception", e);
throw new RuntimeException("configuration exception", e);
} catch (InterruptedException e) {
LOG.error("configuration exception", e);
throw new RuntimeException("configuration exception", e);
}
}
void setStreamJobDetails(JobConf job) {
String s = job.get("stream.minRecWrittenToEnableSkip_");
if (s != null) {
minRecWrittenToEnableSkip_ = Long.parseLong(s);
LOG.info("JobConf set minRecWrittenToEnableSkip_ ="
+ minRecWrittenToEnableSkip_);
}
}
void addJobConfToEnvironment(JobConf jobconf, Properties env) {
JobConf conf = new JobConf(jobconf);
conf.setDeprecatedProperties();
int lenLimit = conf.getInt("stream.jobconf.truncate.limit", -1);
for (Entry<String, String> confEntry: conf) {
String name = confEntry.getKey();
String value = conf.get(name); // does variable expansion
name = safeEnvVarName(name);
if (lenLimit > -1 && value.length() > lenLimit) {
LOG.warn("Environment variable " + name + " truncated to " + lenLimit
+ " to fit system limits.");
value = value.substring(0, lenLimit);
}
envPut(env, name, value);
}
}
String safeEnvVarName(String var) {
StringBuffer safe = new StringBuffer();
int len = var.length();
for (int i = 0; i < len; i++) {
char c = var.charAt(i);
char s;
if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')) {
s = c;
} else {
s = '_';
}
safe.append(s);
}
return safe.toString();
}
void addEnvironment(Properties env, String nameVals) {
// encoding "a=b c=d" from StreamJob
if (nameVals == null) return;
String[] nv = nameVals.split(" ");
for (int i = 0; i < nv.length; i++) {
String[] pair = nv[i].split("=", 2);
if (pair.length != 2) {
LOG.info("Skip env entry:" + nv[i]);
} else {
envPut(env, pair[0], pair[1]);
}
}
}
void envPut(Properties env, String name, String value) {
if (LOG.isDebugEnabled()) {
LOG.debug("Add env entry:" + name + "=" + value);
}
env.put(name, value);
}
void startOutputThreads(OutputCollector output, Reporter reporter)
throws IOException {
inWriter_ = createInputWriter();
outReader_ = createOutputReader();
outThread_ = new MROutputThread(outReader_, output, reporter);
outThread_.start();
errThread_ = new MRErrorThread();
errThread_.setReporter(reporter);
errThread_.start();
}
void waitOutputThreads() throws IOException {
try {
if (outThread_ == null) {
// This happens only when reducer has empty input(So reduce() is not
// called at all in this task). If reducer still generates output,
// which is very uncommon and we may not have to support this case.
// So we don't write this output to HDFS, but we consume/collect
// this output just to avoid reducer hanging forever.
OutputCollector collector = new OutputCollector() {
public void collect(Object key, Object value)
throws IOException {
//just consume it, no need to write the record anywhere
}
};
Reporter reporter = Reporter.NULL;//dummy reporter
startOutputThreads(collector, reporter);
}
int exitVal = sim.waitFor();
// how'd it go?
if (exitVal != 0) {
if (nonZeroExitIsFailure_) {
throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
+ exitVal);
} else {
LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
"code " + exitVal + " in " + PipeMapRed.class.getName());
}
}
if (outThread_ != null) {
outThread_.join(joinDelay_);
}
if (errThread_ != null) {
errThread_.join(joinDelay_);
}
if (outerrThreadsThrowable != null) {
throw new RuntimeException(outerrThreadsThrowable);
}
} catch (InterruptedException e) {
//ignore
}
}
abstract InputWriter createInputWriter() throws IOException;
InputWriter createInputWriter(Class<? extends InputWriter> inputWriterClass)
throws IOException {
InputWriter inputWriter =
ReflectionUtils.newInstance(inputWriterClass, job_);
inputWriter.initialize(this);
return inputWriter;
}
abstract OutputReader createOutputReader() throws IOException;
OutputReader createOutputReader(Class<? extends OutputReader> outputReaderClass)
throws IOException {
OutputReader outputReader =
ReflectionUtils.newInstance(outputReaderClass, job_);
outputReader.initialize(this);
return outputReader;
}
class MROutputThread extends Thread {
MROutputThread(OutputReader outReader, OutputCollector outCollector,
Reporter reporter) {
setDaemon(true);
this.outReader = outReader;
this.outCollector = outCollector;
this.reporter = reporter;
}
public void run() {
try {
// 3/4 Tool to Hadoop
while (outReader.readKeyValue()) {
Object key = outReader.getCurrentKey();
Object value = outReader.getCurrentValue();
outCollector.collect(key, value);
numRecWritten_++;
long now = System.currentTimeMillis();
if (now-lastStdoutReport > reporterOutDelay_) {
lastStdoutReport = now;
String hline = "Records R/W=" + numRecRead_ + "/" + numRecWritten_;
if (!processProvidedStatus_) {
reporter.setStatus(hline);
} else {
reporter.progress();
}
LOG.info(hline);
}
}
} catch (Throwable th) {
outerrThreadsThrowable = th;
LOG.warn(th);
} finally {
try {
if (clientIn_ != null) {
clientIn_.close();
clientIn_ = null;
}
} catch (IOException io) {
LOG.info(io);
}
}
}
OutputReader outReader = null;
OutputCollector outCollector = null;
Reporter reporter = null;
long lastStdoutReport = 0;
}
class MRErrorThread extends Thread {
public MRErrorThread() {
this.reporterPrefix = job_.get("stream.stderr.reporter.prefix", "reporter:");
this.counterPrefix = reporterPrefix + "counter:";
this.statusPrefix = reporterPrefix + "status:";
setDaemon(true);
}
public void setReporter(Reporter reporter) {
this.reporter = reporter;
}
public void run() {
Text line = new Text();
LineReader lineReader = null;
try {
lineReader = new LineReader((InputStream)clientErr_, job_);
while (lineReader.readLine(line) > 0) {
String lineStr = line.toString();
if (matchesReporter(lineStr)) {
if (matchesCounter(lineStr)) {
incrCounter(lineStr);
} else if (matchesStatus(lineStr)) {
processProvidedStatus_ = true;
setStatus(lineStr);
} else {
LOG.warn("Cannot parse reporter line: " + lineStr);
}
} else {
System.err.println(lineStr);
}
long now = System.currentTimeMillis();
if (reporter != null && now-lastStderrReport > reporterErrDelay_) {
lastStderrReport = now;
reporter.progress();
}
line.clear();
}
if (lineReader != null) {
lineReader.close();
}
if (clientErr_ != null) {
clientErr_.close();
clientErr_ = null;
LOG.info("MRErrorThread done");
}
} catch (Throwable th) {
outerrThreadsThrowable = th;
LOG.warn(th);
try {
if (lineReader != null) {
lineReader.close();
}
if (clientErr_ != null) {
clientErr_.close();
clientErr_ = null;
}
} catch (IOException io) {
LOG.info(io);
}
}
}
private boolean matchesReporter(String line) {
return line.startsWith(reporterPrefix);
}
private boolean matchesCounter(String line) {
return line.startsWith(counterPrefix);
}
private boolean matchesStatus(String line) {
return line.startsWith(statusPrefix);
}
private void incrCounter(String line) {
String trimmedLine = line.substring(counterPrefix.length()).trim();
String[] columns = trimmedLine.split(",");
if (columns.length == 3) {
try {
reporter.incrCounter(columns[0], columns[1],
Long.parseLong(columns[2]));
} catch (NumberFormatException e) {
LOG.warn("Cannot parse counter increment '" + columns[2] +
"' from line: " + line);
}
} else {
LOG.warn("Cannot parse counter line: " + line);
}
}
private void setStatus(String line) {
reporter.setStatus(line.substring(statusPrefix.length()).trim());
}
long lastStderrReport = 0;
volatile Reporter reporter;
private final String reporterPrefix;
private final String counterPrefix;
private final String statusPrefix;
}
public void mapRedFinished() {
try {
if (!doPipe_) {
LOG.info("mapRedFinished");
return;
}
if (clientOut_ != null) {
try {
clientOut_.flush();
clientOut_.close();
} catch (IOException io) {
LOG.warn(io);
}
}
try {
waitOutputThreads();
} catch (IOException io) {
LOG.warn(io);
}
if (sim != null) sim.destroy();
LOG.info("mapRedFinished");
} catch (RuntimeException e) {
LOG.info("PipeMapRed failed!", e);
throw e;
}
}
void maybeLogRecord() {
if (numRecRead_ >= nextRecReadLog_) {
String info = numRecInfo();
LOG.info(info);
if (nextRecReadLog_ < 100000) {
nextRecReadLog_ *= 10;
} else {
nextRecReadLog_ += 100000;
}
}
}
public String getContext() {
String s = numRecInfo() + "\n";
s += "minRecWrittenToEnableSkip_=" + minRecWrittenToEnableSkip_ + " ";
s += envline("HOST");
s += envline("USER");
s += envline("HADOOP_USER");
if (outThread_ != null) {
s += "last tool output: |" + outReader_.getLastOutput() + "|\n";
}
return s;
}
String envline(String var) {
return var + "=" + StreamUtil.env().get(var) + "\n";
}
String numRecInfo() {
long elapsed = (System.currentTimeMillis() - startTime_) / 1000;
return "R/W/S=" + numRecRead_ + "/" + numRecWritten_ + "/" + numRecSkipped_ + " in:"
+ safeDiv(numRecRead_, elapsed) + " [rec/s]" + " out:" + safeDiv(numRecWritten_, elapsed)
+ " [rec/s]";
}
String safeDiv(long n, long d) {
return (d == 0) ? "NA" : "" + n / d + "=" + n + "/" + d;
}
long startTime_;
long numRecRead_ = 0;
long numRecWritten_ = 0;
long numRecSkipped_ = 0;
long nextRecReadLog_ = 1;
long minRecWrittenToEnableSkip_ = Long.MAX_VALUE;
long reporterOutDelay_ = 10*1000L;
long reporterErrDelay_ = 10*1000L;
long joinDelay_;
JobConf job_;
boolean doPipe_;
Class<? extends InputWriter> mapInputWriterClass_;
Class<? extends OutputReader> mapOutputReaderClass_;
Class<? extends InputWriter> reduceInputWriterClass_;
Class<? extends OutputReader> reduceOutputReaderClass_;
boolean nonZeroExitIsFailure_;
Process sim;
InputWriter inWriter_;
OutputReader outReader_;
MROutputThread outThread_;
MRErrorThread errThread_;
DataOutputStream clientOut_;
DataInputStream clientErr_;
DataInputStream clientIn_;
// set in PipeMapper/PipeReducer subclasses
int numExceptions_;
protected volatile Throwable outerrThreadsThrowable;
volatile boolean processProvidedStatus_ = false;
}
| 19,128 | 29.508772 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.net.URLDecoder;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.streaming.io.InputWriter;
import org.apache.hadoop.streaming.io.OutputReader;
import org.apache.hadoop.streaming.io.TextInputWriter;
/** A generic Mapper bridge.
* It delegates operations to an external program via stdin and stdout.
*/
public class PipeMapper extends PipeMapRed implements Mapper {
private boolean ignoreKey = false;
private boolean skipping = false;
private byte[] mapOutputFieldSeparator;
private byte[] mapInputFieldSeparator;
private int numOfMapOutputKeyFields = 1;
String getPipeCommand(JobConf job) {
String str = job.get("stream.map.streamprocessor");
if (str == null) {
return str;
}
try {
return URLDecoder.decode(str, "UTF-8");
}
catch (UnsupportedEncodingException e) {
System.err.println("stream.map.streamprocessor in jobconf not found");
return null;
}
}
boolean getDoPipe() {
return true;
}
public void configure(JobConf job) {
super.configure(job);
//disable the auto increment of the counter. For streaming, no of
//processed records could be different(equal or less) than the no of
//records input.
SkipBadRecords.setAutoIncrMapperProcCount(job, false);
skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
if (mapInputWriterClass_.getCanonicalName().equals(TextInputWriter.class.getCanonicalName())) {
String inputFormatClassName = job.getClass("mapred.input.format.class", TextInputFormat.class).getCanonicalName();
ignoreKey = job.getBoolean("stream.map.input.ignoreKey",
inputFormatClassName.equals(TextInputFormat.class.getCanonicalName()));
}
try {
mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t").getBytes("UTF-8");
mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t").getBytes("UTF-8");
numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
}
}
// Do NOT declare default constructor
// (MapRed creates it reflectively)
public void map(Object key, Object value, OutputCollector output, Reporter reporter) throws IOException {
if (outerrThreadsThrowable != null) {
mapRedFinished();
throw new IOException("MROutput/MRErrThread failed:",
outerrThreadsThrowable);
}
try {
// 1/4 Hadoop in
numRecRead_++;
maybeLogRecord();
// 2/4 Hadoop to Tool
if (numExceptions_ == 0) {
if (!this.ignoreKey) {
inWriter_.writeKey(key);
}
inWriter_.writeValue(value);
if(skipping) {
//flush the streams on every record input if running in skip mode
//so that we don't buffer other records surrounding a bad record.
clientOut_.flush();
}
} else {
numRecSkipped_++;
}
} catch (IOException io) {
numExceptions_++;
if (numExceptions_ > 1 || numRecWritten_ < minRecWrittenToEnableSkip_) {
// terminate with failure
LOG.info(getContext() , io);
mapRedFinished();
throw io;
} else {
// terminate with success:
// swallow input records although the stream processor failed/closed
}
}
}
public void close() {
mapRedFinished();
}
@Override
public byte[] getInputSeparator() {
return mapInputFieldSeparator;
}
@Override
public byte[] getFieldSeparator() {
return mapOutputFieldSeparator;
}
@Override
public int getNumOfKeyFields() {
return numOfMapOutputKeyFields;
}
@Override
InputWriter createInputWriter() throws IOException {
return super.createInputWriter(mapInputWriterClass_);
}
@Override
OutputReader createOutputReader() throws IOException {
return super.createOutputReader(mapOutputReaderClass_);
}
}
| 5,174 | 31.54717 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.typedbytes.TypedBytesInput;
import org.apache.hadoop.typedbytes.TypedBytesWritable;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Utility program that reads typed bytes from standard input and stores them in
* a sequence file for which the path is given as an argument.
*/
public class LoadTypedBytes implements Tool {
private Configuration conf;
public LoadTypedBytes(Configuration conf) {
this.conf = conf;
}
public LoadTypedBytes() {
this(new Configuration());
}
public Configuration getConf() {
return conf;
}
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* The main driver for <code>LoadTypedBytes</code>.
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.err.println("Too few arguments!");
printUsage();
return 1;
}
Path path = new Path(args[0]);
FileSystem fs = path.getFileSystem(getConf());
if (fs.exists(path)) {
System.err.println("given path exists already!");
return -1;
}
TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path,
TypedBytesWritable.class, TypedBytesWritable.class);
try {
TypedBytesWritable key = new TypedBytesWritable();
TypedBytesWritable value = new TypedBytesWritable();
byte[] rawKey = tbinput.readRaw();
while (rawKey != null) {
byte[] rawValue = tbinput.readRaw();
key.set(rawKey, 0, rawKey.length);
value.set(rawValue, 0, rawValue.length);
writer.append(key, value);
rawKey = tbinput.readRaw();
}
} finally {
writer.close();
}
return 0;
}
private void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
+ " loadtb <path>");
System.out.println(" Reads typed bytes from standard input" +
" and stores them in a sequence file in");
System.out.println(" the specified path");
}
public static void main(String[] args) throws Exception {
LoadTypedBytes loadtb = new LoadTypedBytes();
int res = ToolRunner.run(loadtb, args);
System.exit(res);
}
}
| 3,342 | 30.537736 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.typedbytes.TypedBytesOutput;
import org.apache.hadoop.typedbytes.TypedBytesWritableOutput;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Utility program that fetches all files that match a given pattern and dumps
* their content to stdout as typed bytes. This works for all files that can be
* handled by {@link org.apache.hadoop.streaming.AutoInputFormat}.
*/
public class DumpTypedBytes implements Tool {
private Configuration conf;
public DumpTypedBytes(Configuration conf) {
this.conf = conf;
}
public DumpTypedBytes() {
this(new Configuration());
}
public Configuration getConf() {
return conf;
}
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* The main driver for <code>DumpTypedBytes</code>.
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.err.println("Too few arguments!");
printUsage();
return 1;
}
Path pattern = new Path(args[0]);
FileSystem fs = pattern.getFileSystem(getConf());
fs.setVerifyChecksum(true);
for (Path p : FileUtil.stat2Paths(fs.globStatus(pattern), pattern)) {
List<FileStatus> inputFiles = new ArrayList<FileStatus>();
FileStatus status = fs.getFileStatus(p);
if (status.isDirectory()) {
FileStatus[] files = fs.listStatus(p);
Collections.addAll(inputFiles, files);
} else {
inputFiles.add(status);
}
return dumpTypedBytes(inputFiles);
}
return -1;
}
private void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
+ " dumptb <glob-pattern>");
System.out.println(" Dumps all files that match the given pattern to " +
"standard output as typed bytes.");
System.out.println(" The files can be text or sequence files");
}
/**
* Dump given list of files to standard output as typed bytes.
*/
@SuppressWarnings("unchecked")
private int dumpTypedBytes(List<FileStatus> files) throws IOException {
JobConf job = new JobConf(getConf());
DataOutputStream dout = new DataOutputStream(System.out);
AutoInputFormat autoInputFormat = new AutoInputFormat();
for (FileStatus fileStatus : files) {
FileSplit split = new FileSplit(fileStatus.getPath(), 0,
fileStatus.getLen() * fileStatus.getBlockSize(),
(String[]) null);
RecordReader recReader = null;
try {
recReader = autoInputFormat.getRecordReader(split, job, Reporter.NULL);
Object key = recReader.createKey();
Object value = recReader.createValue();
while (recReader.next(key, value)) {
if (key instanceof Writable) {
TypedBytesWritableOutput.get(dout).write((Writable) key);
} else {
TypedBytesOutput.get(dout).write(key);
}
if (value instanceof Writable) {
TypedBytesWritableOutput.get(dout).write((Writable) value);
} else {
TypedBytesOutput.get(dout).write(value);
}
}
} finally {
if (recReader != null) {
recReader.close();
}
}
}
dout.flush();
return 0;
}
public static void main(String[] args) throws Exception {
DumpTypedBytes dumptb = new DumpTypedBytes();
int res = ToolRunner.run(dumptb, args);
System.exit(res);
}
}
| 4,843 | 31.952381 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.util.Arrays;
import org.apache.hadoop.util.ToolRunner;
/** The main entry point. Usually invoked with the script
* bin/hadoop jar hadoop-streaming.jar args.
*/
public class HadoopStreaming {
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("No Arguments Given!");
printUsage();
System.exit(1);
}
int returnStatus = 0;
String cmd = args[0];
String[] remainingArgs = Arrays.copyOfRange(args, 1, args.length);
if (cmd.equalsIgnoreCase("dumptb")) {
DumpTypedBytes dumptb = new DumpTypedBytes();
returnStatus = ToolRunner.run(dumptb, remainingArgs);
} else if (cmd.equalsIgnoreCase("loadtb")) {
LoadTypedBytes loadtb = new LoadTypedBytes();
returnStatus = ToolRunner.run(loadtb, remainingArgs);
} else if (cmd.equalsIgnoreCase("streamjob")) {
StreamJob job = new StreamJob();
returnStatus = ToolRunner.run(job, remainingArgs);
} else { // for backward compatibility
StreamJob job = new StreamJob();
returnStatus = ToolRunner.run(job, args);
}
if (returnStatus != 0) {
System.err.println("Streaming Command Failed!");
System.exit(returnStatus);
}
}
private static void printUsage() {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
+ " [options]");
System.out.println("Options:");
System.out.println(" dumptb <glob-pattern> Dumps all files that match the"
+ " given pattern to ");
System.out.println(" standard output as typed " +
"bytes.");
System.out.println(" loadtb <path> Reads typed bytes from standard input" +
" and stores them in");
System.out.println(" a sequence file in the specified path");
System.out.println(" [streamjob] <args> Runs streaming job with given" +
" arguments");
}
}
| 2,782 | 37.123288 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/AutoInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.EOFException;
import java.io.IOException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
/**
* An {@link InputFormat} that tries to deduce the types of the input files
* automatically. It can currently handle text and sequence files.
*/
public class AutoInputFormat extends FileInputFormat {
private TextInputFormat textInputFormat = new TextInputFormat();
private SequenceFileInputFormat seqFileInputFormat =
new SequenceFileInputFormat();
public void configure(JobConf job) {
textInputFormat.configure(job);
// SequenceFileInputFormat has no configure() method
}
public RecordReader getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
FileSplit fileSplit = (FileSplit) split;
FileSystem fs = FileSystem.get(fileSplit.getPath().toUri(), job);
FSDataInputStream is = fs.open(fileSplit.getPath());
byte[] header = new byte[3];
RecordReader reader = null;
try {
is.readFully(header);
} catch (EOFException eof) {
reader = textInputFormat.getRecordReader(split, job, reporter);
} finally {
is.close();
}
if (header[0] == 'S' && header[1] == 'E' && header[2] == 'Q') {
reader = seqFileInputFormat.getRecordReader(split, job, reporter);
} else {
reader = textInputFormat.getRecordReader(split, job, reporter);
}
return reader;
}
}
| 2,699 | 35 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URL;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRConfig;
/**
* Utilities used in streaming
*/
@InterfaceAudience.Private
public class StreamUtil {
/** It may seem strange to silently switch behaviour when a String
* is not a classname; the reason is simplified Usage:<pre>
* -mapper [classname | program ]
* instead of the explicit Usage:
* [-mapper program | -javamapper classname], -mapper and -javamapper are mutually exclusive.
* (repeat for -reducer, -combiner) </pre>
*/
public static Class goodClassOrNull(Configuration conf, String className, String defaultPackage) {
Class clazz = null;
try {
clazz = conf.getClassByName(className);
} catch (ClassNotFoundException cnf) {
}
if (clazz == null) {
if (className.indexOf('.') == -1 && defaultPackage != null) {
className = defaultPackage + "." + className;
try {
clazz = conf.getClassByName(className);
} catch (ClassNotFoundException cnf) {
}
}
}
return clazz;
}
public static String findInClasspath(String className) {
return findInClasspath(className, StreamUtil.class.getClassLoader());
}
/** @return a jar file path or a base directory or null if not found.
*/
public static String findInClasspath(String className, ClassLoader loader) {
String relPath = className;
relPath = relPath.replace('.', '/');
relPath += ".class";
java.net.URL classUrl = loader.getResource(relPath);
String codePath;
if (classUrl != null) {
boolean inJar = classUrl.getProtocol().equals("jar");
codePath = classUrl.toString();
if (codePath.startsWith("jar:")) {
codePath = codePath.substring("jar:".length());
}
if (codePath.startsWith("file:")) { // can have both
codePath = codePath.substring("file:".length());
}
if (inJar) {
// A jar spec: remove class suffix in /path/my.jar!/package/Class
int bang = codePath.lastIndexOf('!');
codePath = codePath.substring(0, bang);
} else {
// A class spec: remove the /my/package/Class.class portion
int pos = codePath.lastIndexOf(relPath);
if (pos == -1) {
throw new IllegalArgumentException("invalid codePath: className=" + className
+ " codePath=" + codePath);
}
codePath = codePath.substring(0, pos);
}
} else {
codePath = null;
}
return codePath;
}
static String qualifyHost(String url) {
try {
return qualifyHost(new URL(url)).toString();
} catch (IOException io) {
return url;
}
}
static URL qualifyHost(URL url) {
try {
InetAddress a = InetAddress.getByName(url.getHost());
String qualHost = a.getCanonicalHostName();
URL q = new URL(url.getProtocol(), qualHost, url.getPort(), url.getFile());
return q;
} catch (IOException io) {
return url;
}
}
static final String regexpSpecials = "[]()?*+|.!^-\\~@";
public static String regexpEscape(String plain) {
StringBuffer buf = new StringBuffer();
char[] ch = plain.toCharArray();
int csup = ch.length;
for (int c = 0; c < csup; c++) {
if (regexpSpecials.indexOf(ch[c]) != -1) {
buf.append("\\");
}
buf.append(ch[c]);
}
return buf.toString();
}
static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
int len = (int) fs.getFileStatus(p).getLen();
byte[] buf = new byte[len];
FSDataInputStream in = fs.open(p);
String contents = null;
try {
in.readFully(in.getPos(), buf);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
static private Environment env;
static String HOST;
static {
try {
env = new Environment();
HOST = env.getHost();
} catch (IOException io) {
io.printStackTrace();
}
}
static Environment env() {
if (env != null) {
return env;
}
try {
env = new Environment();
} catch (IOException io) {
io.printStackTrace();
}
return env;
}
public static boolean isLocalJobTracker(JobConf job) {
String framework =
job.get(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
return framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME);
}
}
| 5,961 | 28.81 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.lang.reflect.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
/** An input format that selects a RecordReader based on a JobConf property.
* This should be used only for non-standard record reader such as
* StreamXmlRecordReader. For all other standard
* record readers, the appropriate input format classes should be used.
*/
public class StreamInputFormat extends KeyValueTextInputFormat {
@SuppressWarnings("unchecked")
public RecordReader<Text, Text> getRecordReader(final InputSplit genericSplit,
JobConf job, Reporter reporter) throws IOException {
String c = job.get("stream.recordreader.class");
if (c == null || c.indexOf("LineRecordReader") >= 0) {
return super.getRecordReader(genericSplit, job, reporter);
}
// handling non-standard record reader (likely StreamXmlRecordReader)
FileSplit split = (FileSplit) genericSplit;
LOG.info("getRecordReader start.....split=" + split);
reporter.setStatus(split.toString());
// Open the file and seek to the start of the split
FileSystem fs = split.getPath().getFileSystem(job);
FSDataInputStream in = fs.open(split.getPath());
// Factory dispatch based on available params..
Class readerClass;
{
readerClass = StreamUtil.goodClassOrNull(job, c, null);
if (readerClass == null) {
throw new RuntimeException("Class not found: " + c);
}
}
Constructor ctor;
try {
ctor = readerClass.getConstructor(new Class[] { FSDataInputStream.class,
FileSplit.class, Reporter.class, JobConf.class, FileSystem.class });
} catch (NoSuchMethodException nsm) {
throw new RuntimeException(nsm);
}
RecordReader<Text, Text> reader;
try {
reader = (RecordReader<Text, Text>) ctor.newInstance(new Object[] { in, split,
reporter, job, fs });
} catch (Exception nsm) {
throw new RuntimeException(nsm);
}
return reader;
}
}
| 3,062 | 35.903614 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PathFinder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
/**
* Maps a relative pathname to an absolute pathname using the PATH environment.
*/
@InterfaceAudience.Private
public class PathFinder {
String pathenv; // a string of pathnames
String pathSep; // the path separator
String fileSep; // the file separator in a directory
/**
* Construct a PathFinder object using the path from java.class.path
*/
public PathFinder() {
pathenv = System.getProperty("java.class.path");
pathSep = System.getProperty("path.separator");
fileSep = System.getProperty("file.separator");
}
/**
* Construct a PathFinder object using the path from the specified system
* environment variable.
*/
public PathFinder(String envpath) {
pathenv = System.getenv(envpath);
pathSep = System.getProperty("path.separator");
fileSep = System.getProperty("file.separator");
}
/**
* Appends the specified component to the path list
*/
public void prependPathComponent(String str) {
pathenv = str + pathSep + pathenv;
}
/**
* Returns the full path name of this file if it is listed in the path
*/
public File getAbsolutePath(String filename) {
if (pathenv == null || pathSep == null || fileSep == null) {
return null;
}
int val = -1;
String classvalue = pathenv + pathSep;
while (((val = classvalue.indexOf(pathSep)) >= 0)
&& classvalue.length() > 0) {
// Extract each entry from the pathenv
String entry = classvalue.substring(0, val).trim();
File f = new File(entry);
if (f.isDirectory()) {
// this entry in the pathenv is a directory.
// see if the required file is in this directory
f = new File(entry + fileSep + filename);
}
// see if the filename matches and we can read it
if (f.isFile() && FileUtil.canRead(f)) {
return f;
}
classvalue = classvalue.substring(val + 1).trim();
}
return null;
}
public static void main(String args[]) throws IOException {
if (args.length < 1) {
System.out.println("Usage: java PathFinder <filename>");
System.exit(1);
}
PathFinder finder = new PathFinder("PATH");
File file = finder.getAbsolutePath(args[0]);
if (file != null) {
System.out.println("Full path name = " + file.getCanonicalPath());
}
}
}
| 3,282 | 30.873786 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/JarBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.*;
import java.util.jar.*;
import java.util.zip.ZipException;
/**
* This class is the main class for generating job.jar
* for Hadoop Streaming jobs. It includes the files specified
* with the -file option and includes them in the jar. Also,
* hadoop-streaming is a user level appplication, so all the classes
* with hadoop-streaming that are needed in the job are also included
* in the job.jar.
*/
public class JarBuilder {
public JarBuilder() {
}
public void setVerbose(boolean v) {
this.verbose = v;
}
public void merge(List srcNames, List srcUnjar, String dstJar) throws IOException {
String source = null;
JarOutputStream jarOut = null;
JarFile jarSource = null;
jarOut = new JarOutputStream(new FileOutputStream(dstJar));
boolean throwing = false;
try {
if (srcNames != null) {
Iterator iter = srcNames.iterator();
while (iter.hasNext()) {
source = (String) iter.next();
File fsource = new File(source);
String base = getBasePathInJarOut(source);
if (!fsource.exists()) {
throwing = true;
throw new FileNotFoundException(fsource.getAbsolutePath());
}
if (fsource.isDirectory()) {
addDirectory(jarOut, base, fsource, 0);
} else {
addFileStream(jarOut, base, fsource);
}
}
}
if (srcUnjar != null) {
Iterator iter = srcUnjar.iterator();
while (iter.hasNext()) {
source = (String) iter.next();
jarSource = new JarFile(source);
addJarEntries(jarOut, jarSource);
jarSource.close();
}
}
} finally {
try {
jarOut.close();
} catch (ZipException z) {
if (!throwing) {
throw new IOException(z.toString());
}
}
}
}
protected String fileExtension(String file) {
int leafPos = file.lastIndexOf('/');
if (leafPos == file.length() - 1) return "";
String leafName = file.substring(leafPos + 1);
int dotPos = leafName.lastIndexOf('.');
if (dotPos == -1) return "";
String ext = leafName.substring(dotPos + 1);
return ext;
}
/** @return empty or a jar base path. Must not start with '/' */
protected String getBasePathInJarOut(String sourceFile) {
// TaskRunner will unjar and append to classpath: .:classes/:lib/*
String ext = fileExtension(sourceFile);
if (ext.equals("class")) {
return "classes/"; // or ""
} else if (ext.equals("jar") || ext.equals("zip")) {
return "lib/";
} else {
return "";
}
}
private void addJarEntries(JarOutputStream dst, JarFile src) throws IOException {
Enumeration entries = src.entries();
JarEntry entry = null;
while (entries.hasMoreElements()) {
entry = (JarEntry) entries.nextElement();
//if (entry.getName().startsWith("META-INF/")) continue;
InputStream in = src.getInputStream(entry);
addNamedStream(dst, entry.getName(), in);
}
}
/** @param name path in jar for this jar element. Must not start with '/' */
void addNamedStream(JarOutputStream dst, String name, InputStream in) throws IOException {
if (verbose) {
System.err.println("JarBuilder.addNamedStream " + name);
}
try {
dst.putNextEntry(new JarEntry(name));
int bytesRead = 0;
while ((bytesRead = in.read(buffer, 0, BUFF_SIZE)) != -1) {
dst.write(buffer, 0, bytesRead);
}
} catch (ZipException ze) {
if (ze.getMessage().indexOf("duplicate entry") >= 0) {
if (verbose) {
System.err.println(ze + " Skip duplicate entry " + name);
}
} else {
throw ze;
}
} finally {
in.close();
dst.flush();
dst.closeEntry();
}
}
void addFileStream(JarOutputStream dst, String jarBaseName, File file) throws IOException {
FileInputStream in = new FileInputStream(file);
try {
String name = jarBaseName + file.getName();
addNamedStream(dst, name, in);
} finally {
in.close();
}
}
void addDirectory(JarOutputStream dst, String jarBaseName, File dir, int depth) throws IOException {
File[] contents = dir.listFiles();
if (contents != null) {
for (int i = 0; i < contents.length; i++) {
File f = contents[i];
String fBaseName = (depth == 0) ? "" : dir.getName();
if (jarBaseName.length() > 0) {
fBaseName = jarBaseName + "/" + fBaseName;
}
if (f.isDirectory()) {
addDirectory(dst, fBaseName, f, depth + 1);
} else {
addFileStream(dst, fBaseName + "/", f);
}
}
}
}
/** Test program */
public static void main(String args[]) {
// args = new String[] { "C:/Temp/merged.jar", "C:/jdk1.5.0/jre/lib/ext/dnsns.jar", "/Temp/addtojar2.log", "C:/jdk1.5.0/jre/lib/ext/mtest.jar", "C:/Temp/base"};
if (args.length < 2) {
System.err.println("Usage: JarFiles merged.jar [src.jar | dir | file ]+");
} else {
JarBuilder jarFiles = new JarBuilder();
List names = new ArrayList();
List unjar = new ArrayList();
for (int i = 1; i < args.length; i++) {
String f = args[i];
String ext = jarFiles.fileExtension(f);
boolean expandAsJar = ext.equals("jar") || ext.equals("zip");
if (expandAsJar) {
unjar.add(f);
} else {
names.add(f);
}
}
try {
jarFiles.merge(names, unjar, args[0]);
Date lastMod = new Date(new File(args[0]).lastModified());
System.out.println("Merge done to " + args[0] + " " + lastMod);
} catch (Exception ge) {
ge.printStackTrace(System.err);
}
}
}
private static final int BUFF_SIZE = 32 * 1024;
private byte buffer[] = new byte[BUFF_SIZE];
protected boolean verbose = false;
}
| 6,781 | 31.449761 | 165 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.apache.hadoop.mapred.MapRunner;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.OutputCollector;
import java.io.IOException;
public class PipeMapRunner<K1, V1, K2, V2> extends MapRunner<K1, V1, K2, V2> {
public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
Reporter reporter)
throws IOException {
PipeMapper pipeMapper = (PipeMapper)getMapper();
pipeMapper.startOutputThreads(output, reporter);
super.run(input, output, reporter);
}
}
| 1,432 | 37.72973 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.SequenceFileAsTextInputFormat;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.lib.LazyOutputFormat;
import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner;
import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer;
import org.apache.hadoop.streaming.io.IdentifierResolver;
import org.apache.hadoop.streaming.io.InputWriter;
import org.apache.hadoop.streaming.io.OutputReader;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.RunJar;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
/** All the client-side work happens here.
* (Jar packaging, MapRed job submission and monitoring)
*/
public class StreamJob implements Tool {
protected static final Log LOG = LogFactory.getLog(StreamJob.class.getName());
final static String REDUCE_NONE = "NONE";
/** -----------Streaming CLI Implementation **/
private CommandLineParser parser = new BasicParser();
private Options allOptions;
/**@deprecated use StreamJob() with ToolRunner or set the
* Configuration using {@link #setConf(Configuration)} and
* run with {@link #run(String[])}.
*/
@Deprecated
public StreamJob(String[] argv, boolean mayExit) {
this();
argv_ = Arrays.copyOf(argv, argv.length);
this.config_ = new Configuration();
}
public StreamJob() {
setupOptions();
this.config_ = new Configuration();
}
@Override
public Configuration getConf() {
return config_;
}
@Override
public void setConf(Configuration conf) {
this.config_ = conf;
}
@Override
public int run(String[] args) throws Exception {
try {
this.argv_ = Arrays.copyOf(args, args.length);
init();
preProcessArgs();
parseArgv();
if (printUsage) {
printUsage(detailedUsage_);
return 0;
}
postProcessArgs();
setJobConf();
} catch (IllegalArgumentException ex) {
//ignore, since log will already be printed
// print the log in debug mode.
LOG.debug("Error in streaming job", ex);
return 1;
}
return submitAndMonitorJob();
}
/**
* This method creates a streaming job from the given argument list.
* The created object can be used and/or submitted to a jobtracker for
* execution by a job agent such as JobControl
* @param argv the list args for creating a streaming job
* @return the created JobConf object
* @throws IOException
*/
static public JobConf createJob(String[] argv) throws IOException {
StreamJob job = new StreamJob();
job.argv_ = argv;
job.init();
job.preProcessArgs();
job.parseArgv();
job.postProcessArgs();
job.setJobConf();
return job.jobConf_;
}
/**
* This is the method that actually
* intializes the job conf and submits the job
* to the jobtracker
* @throws IOException
* @deprecated use {@link #run(String[])} instead.
*/
@Deprecated
public int go() throws IOException {
try {
return run(argv_);
}
catch (Exception ex) {
throw new IOException(ex.getMessage());
}
}
protected void init() {
try {
env_ = new Environment();
} catch (IOException io) {
throw new RuntimeException(io);
}
}
void preProcessArgs() {
verbose_ = false;
// Unset HADOOP_ROOT_LOGGER in case streaming job
// invokes additional hadoop commands.
addTaskEnvironment_ = "HADOOP_ROOT_LOGGER=";
}
void postProcessArgs() throws IOException {
if (inputSpecs_.size() == 0) {
fail("Required argument: -input <name>");
}
if (output_ == null) {
fail("Required argument: -output ");
}
msg("addTaskEnvironment=" + addTaskEnvironment_);
for (final String packageFile : packageFiles_) {
File f = new File(packageFile);
if (f.isFile()) {
shippedCanonFiles_.add(f.getCanonicalPath());
}
}
msg("shippedCanonFiles_=" + shippedCanonFiles_);
// careful with class names..
mapCmd_ = unqualifyIfLocalPath(mapCmd_);
comCmd_ = unqualifyIfLocalPath(comCmd_);
redCmd_ = unqualifyIfLocalPath(redCmd_);
}
String unqualifyIfLocalPath(String cmd) throws IOException {
if (cmd == null) {
//
} else {
String prog = cmd;
String args = "";
int s = cmd.indexOf(" ");
if (s != -1) {
prog = cmd.substring(0, s);
args = cmd.substring(s + 1);
}
String progCanon;
try {
progCanon = new File(prog).getCanonicalPath();
} catch (IOException io) {
progCanon = prog;
}
boolean shipped = shippedCanonFiles_.contains(progCanon);
msg("shipped: " + shipped + " " + progCanon);
if (shipped) {
// Change path to simple filename.
// That way when PipeMapRed calls Runtime.exec(),
// it will look for the excutable in Task's working dir.
// And this is where TaskRunner unjars our job jar.
prog = new File(prog).getName();
if (args.length() > 0) {
cmd = prog + " " + args;
} else {
cmd = prog;
}
}
}
msg("cmd=" + cmd);
return cmd;
}
void parseArgv() {
CommandLine cmdLine = null;
try {
cmdLine = parser.parse(allOptions, argv_);
} catch(Exception oe) {
LOG.error(oe.getMessage());
exitUsage(argv_.length > 0 && "-info".equals(argv_[0]));
}
if (cmdLine != null) {
@SuppressWarnings("unchecked")
List<String> args = cmdLine.getArgList();
if(args != null && args.size() > 0) {
fail("Found " + args.size() + " unexpected arguments on the " +
"command line " + args);
}
detailedUsage_ = cmdLine.hasOption("info");
if (cmdLine.hasOption("help") || detailedUsage_) {
printUsage = true;
return;
}
verbose_ = cmdLine.hasOption("verbose");
background_ = cmdLine.hasOption("background");
debug_ = cmdLine.hasOption("debug")? debug_ + 1 : debug_;
String[] values = cmdLine.getOptionValues("input");
if (values != null && values.length > 0) {
for (String input : values) {
inputSpecs_.add(input);
}
}
output_ = cmdLine.getOptionValue("output");
mapCmd_ = cmdLine.getOptionValue("mapper");
comCmd_ = cmdLine.getOptionValue("combiner");
redCmd_ = cmdLine.getOptionValue("reducer");
lazyOutput_ = cmdLine.hasOption("lazyOutput");
values = cmdLine.getOptionValues("file");
if (values != null && values.length > 0) {
LOG.warn("-file option is deprecated, please use generic option" +
" -files instead.");
StringBuffer fileList = new StringBuffer();
for (String file : values) {
packageFiles_.add(file);
try {
Path path = new Path(file);
FileSystem localFs = FileSystem.getLocal(config_);
String finalPath = path.makeQualified(localFs).toString();
if(fileList.length() > 0) {
fileList.append(',');
}
fileList.append(finalPath);
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
String tmpFiles = config_.get("tmpfiles", "");
if (tmpFiles.isEmpty()) {
tmpFiles = fileList.toString();
} else {
tmpFiles = tmpFiles + "," + fileList;
}
config_.set("tmpfiles", tmpFiles);
validate(packageFiles_);
}
String fsName = cmdLine.getOptionValue("dfs");
if (null != fsName){
LOG.warn("-dfs option is deprecated, please use -fs instead.");
config_.set("fs.default.name", fsName);
}
additionalConfSpec_ = cmdLine.getOptionValue("additionalconfspec");
inputFormatSpec_ = cmdLine.getOptionValue("inputformat");
outputFormatSpec_ = cmdLine.getOptionValue("outputformat");
numReduceTasksSpec_ = cmdLine.getOptionValue("numReduceTasks");
partitionerSpec_ = cmdLine.getOptionValue("partitioner");
inReaderSpec_ = cmdLine.getOptionValue("inputreader");
mapDebugSpec_ = cmdLine.getOptionValue("mapdebug");
reduceDebugSpec_ = cmdLine.getOptionValue("reducedebug");
ioSpec_ = cmdLine.getOptionValue("io");
String[] car = cmdLine.getOptionValues("cacheArchive");
if (null != car && car.length > 0){
LOG.warn("-cacheArchive option is deprecated, please use -archives instead.");
for(String s : car){
cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s;
}
}
String[] caf = cmdLine.getOptionValues("cacheFile");
if (null != caf && caf.length > 0){
LOG.warn("-cacheFile option is deprecated, please use -files instead.");
for(String s : caf){
cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s;
}
}
String[] jobconf = cmdLine.getOptionValues("jobconf");
if (null != jobconf && jobconf.length > 0){
LOG.warn("-jobconf option is deprecated, please use -D instead.");
for(String s : jobconf){
String[] parts = s.split("=", 2);
config_.set(parts[0], parts[1]);
}
}
String[] cmd = cmdLine.getOptionValues("cmdenv");
if (null != cmd && cmd.length > 0){
for(String s : cmd) {
if (addTaskEnvironment_.length() > 0) {
addTaskEnvironment_ += " ";
}
addTaskEnvironment_ += s;
}
}
} else {
exitUsage(argv_.length > 0 && "-info".equals(argv_[0]));
}
}
protected void msg(String msg) {
if (verbose_) {
System.out.println("STREAM: " + msg);
}
}
private Option createOption(String name, String desc,
String argName, int max, boolean required){
return OptionBuilder
.withArgName(argName)
.hasArgs(max)
.withDescription(desc)
.isRequired(required)
.create(name);
}
private Option createBoolOption(String name, String desc){
return OptionBuilder.withDescription(desc).create(name);
}
private void validate(final List<String> values)
throws IllegalArgumentException {
for (String file : values) {
File f = new File(file);
if (!FileUtil.canRead(f)) {
fail("File: " + f.getAbsolutePath()
+ " does not exist, or is not readable.");
}
}
}
private void setupOptions(){
// input and output are not required for -info and -help options,
// though they are required for streaming job to be run.
Option input = createOption("input",
"DFS input file(s) for the Map step",
"path",
Integer.MAX_VALUE,
false);
Option output = createOption("output",
"DFS output directory for the Reduce step",
"path", 1, false);
Option mapper = createOption("mapper",
"The streaming command to run", "cmd", 1, false);
Option combiner = createOption("combiner",
"The streaming command to run", "cmd", 1, false);
// reducer could be NONE
Option reducer = createOption("reducer",
"The streaming command to run", "cmd", 1, false);
Option file = createOption("file",
"File to be shipped in the Job jar file",
"file", Integer.MAX_VALUE, false);
Option dfs = createOption("dfs",
"Optional. Override DFS configuration", "<h:p>|local", 1, false);
Option additionalconfspec = createOption("additionalconfspec",
"Optional.", "spec", 1, false);
Option inputformat = createOption("inputformat",
"Optional.", "spec", 1, false);
Option outputformat = createOption("outputformat",
"Optional.", "spec", 1, false);
Option partitioner = createOption("partitioner",
"Optional.", "spec", 1, false);
Option numReduceTasks = createOption("numReduceTasks",
"Optional.", "spec",1, false );
Option inputreader = createOption("inputreader",
"Optional.", "spec", 1, false);
Option mapDebug = createOption("mapdebug",
"Optional.", "spec", 1, false);
Option reduceDebug = createOption("reducedebug",
"Optional", "spec",1, false);
Option jobconf =
createOption("jobconf",
"(n=v) Optional. Add or override a JobConf property.",
"spec", 1, false);
Option cmdenv =
createOption("cmdenv", "(n=v) Pass env.var to streaming commands.",
"spec", 1, false);
Option cacheFile = createOption("cacheFile",
"File name URI", "fileNameURI", Integer.MAX_VALUE, false);
Option cacheArchive = createOption("cacheArchive",
"File name URI", "fileNameURI", Integer.MAX_VALUE, false);
Option io = createOption("io",
"Optional.", "spec", 1, false);
// boolean properties
Option background = createBoolOption("background", "Submit the job and don't wait till it completes.");
Option verbose = createBoolOption("verbose", "print verbose output");
Option info = createBoolOption("info", "print verbose output");
Option help = createBoolOption("help", "print this help message");
Option debug = createBoolOption("debug", "print debug output");
Option lazyOutput = createBoolOption("lazyOutput", "create outputs lazily");
allOptions = new Options().
addOption(input).
addOption(output).
addOption(mapper).
addOption(combiner).
addOption(reducer).
addOption(file).
addOption(dfs).
addOption(additionalconfspec).
addOption(inputformat).
addOption(outputformat).
addOption(partitioner).
addOption(numReduceTasks).
addOption(inputreader).
addOption(mapDebug).
addOption(reduceDebug).
addOption(jobconf).
addOption(cmdenv).
addOption(cacheFile).
addOption(cacheArchive).
addOption(io).
addOption(background).
addOption(verbose).
addOption(info).
addOption(debug).
addOption(help).
addOption(lazyOutput);
}
public void exitUsage(boolean detailed) {
printUsage(detailed);
fail("");
}
private void printUsage(boolean detailed) {
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
+ " [options]");
System.out.println("Options:");
System.out.println(" -input <path> DFS input file(s) for the Map"
+ " step.");
System.out.println(" -output <path> DFS output directory for the"
+ " Reduce step.");
System.out.println(" -mapper <cmd|JavaClassName> Optional. Command"
+ " to be run as mapper.");
System.out.println(" -combiner <cmd|JavaClassName> Optional. Command"
+ " to be run as combiner.");
System.out.println(" -reducer <cmd|JavaClassName> Optional. Command"
+ " to be run as reducer.");
System.out.println(" -file <file> Optional. File/dir to be "
+ "shipped in the Job jar file.\n" +
" Deprecated. Use generic option \"-files\" instead.");
System.out.println(" -inputformat <TextInputFormat(default)"
+ "|SequenceFileAsTextInputFormat|JavaClassName>\n"
+ " Optional. The input format class.");
System.out.println(" -outputformat <TextOutputFormat(default)"
+ "|JavaClassName>\n"
+ " Optional. The output format class.");
System.out.println(" -partitioner <JavaClassName> Optional. The"
+ " partitioner class.");
System.out.println(" -numReduceTasks <num> Optional. Number of reduce "
+ "tasks.");
System.out.println(" -inputreader <spec> Optional. Input recordreader"
+ " spec.");
System.out.println(" -cmdenv <n>=<v> Optional. Pass env.var to"
+ " streaming commands.");
System.out.println(" -mapdebug <cmd> Optional. "
+ "To run this script when a map task fails.");
System.out.println(" -reducedebug <cmd> Optional."
+ " To run this script when a reduce task fails.");
System.out.println(" -io <identifier> Optional. Format to use"
+ " for input to and output");
System.out.println(" from mapper/reducer commands");
System.out.println(" -lazyOutput Optional. Lazily create Output.");
System.out.println(" -background Optional. Submit the job and don't wait till it completes.");
System.out.println(" -verbose Optional. Print verbose output.");
System.out.println(" -info Optional. Print detailed usage.");
System.out.println(" -help Optional. Print help message.");
System.out.println();
GenericOptionsParser.printGenericCommandUsage(System.out);
if (!detailed) {
System.out.println();
System.out.println("For more details about these options:");
System.out.println("Use " +
"$HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar -info");
return;
}
System.out.println();
System.out.println("Usage tips:");
System.out.println("In -input: globbing on <path> is supported and can "
+ "have multiple -input");
System.out.println();
System.out.println("Default Map input format: a line is a record in UTF-8 "
+ "the key part ends at first");
System.out.println(" TAB, the rest of the line is the value");
System.out.println();
System.out.println("To pass a Custom input format:");
System.out.println(" -inputformat package.MyInputFormat");
System.out.println();
System.out.println("Similarly, to pass a custom output format:");
System.out.println(" -outputformat package.MyOutputFormat");
System.out.println();
System.out.println("The files with extensions .class and .jar/.zip," +
" specified for the -file");
System.out.println(" argument[s], end up in \"classes\" and \"lib\" " +
"directories respectively inside");
System.out.println(" the working directory when the mapper and reducer are"
+ " run. All other files");
System.out.println(" specified for the -file argument[s]" +
" end up in the working directory when the");
System.out.println(" mapper and reducer are run. The location of this " +
"working directory is");
System.out.println(" unspecified.");
System.out.println();
System.out.println("To set the number of reduce tasks (num. of output " +
"files) as, say 10:");
System.out.println(" Use -numReduceTasks 10");
System.out.println("To skip the sort/combine/shuffle/sort/reduce step:");
System.out.println(" Use -numReduceTasks 0");
System.out.println(" Map output then becomes a 'side-effect " +
"output' rather than a reduce input.");
System.out.println(" This speeds up processing. This also feels " +
"more like \"in-place\" processing");
System.out.println(" because the input filename and the map " +
"input order are preserved.");
System.out.println(" This is equivalent to -reducer NONE");
System.out.println();
System.out.println("To speed up the last maps:");
System.out.println(" -D " + MRJobConfig.MAP_SPECULATIVE + "=true");
System.out.println("To speed up the last reduces:");
System.out.println(" -D " + MRJobConfig.REDUCE_SPECULATIVE + "=true");
System.out.println("To name the job (appears in the JobTracker Web UI):");
System.out.println(" -D " + MRJobConfig.JOB_NAME + "='My Job'");
System.out.println("To change the local temp directory:");
System.out.println(" -D dfs.data.dir=/tmp/dfs");
System.out.println(" -D stream.tmpdir=/tmp/streaming");
System.out.println("Additional local temp directories with -jt local:");
System.out.println(" -D " + MRConfig.LOCAL_DIR + "=/tmp/local");
System.out.println(" -D " + JTConfig.JT_SYSTEM_DIR + "=/tmp/system");
System.out.println(" -D " + MRConfig.TEMP_DIR + "=/tmp/temp");
System.out.println("To treat tasks with non-zero exit status as SUCCEDED:");
System.out.println(" -D stream.non.zero.exit.is.failure=false");
System.out.println("Use a custom hadoop streaming build along with standard"
+ " hadoop install:");
System.out.println(" $HADOOP_PREFIX/bin/hadoop jar " +
"/path/my-hadoop-streaming.jar [...]\\");
System.out.println(" [...] -D stream.shipped.hadoopstreaming=" +
"/path/my-hadoop-streaming.jar");
System.out.println("For more details about jobconf parameters see:");
System.out.println(" http://wiki.apache.org/hadoop/JobConfFile");
System.out.println("Truncate the values of the job configuration copied" +
"to the environment at the given length:");
System.out.println(" -D stream.jobconf.truncate.limit=-1");
System.out.println("To set an environment variable in a streaming " +
"command:");
System.out.println(" -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
System.out.println();
System.out.println("Shortcut:");
System.out.println(" setenv HSTREAMING \"$HADOOP_PREFIX/bin/hadoop jar " +
"hadoop-streaming.jar\"");
System.out.println();
System.out.println("Example: $HSTREAMING -mapper " +
"\"/usr/local/bin/perl5 filter.pl\"");
System.out.println(" -file /local/filter.pl -input " +
"\"/logs/0604*/*\" [...]");
System.out.println(" Ships a script, invokes the non-shipped perl " +
"interpreter. Shipped files go to");
System.out.println(" the working directory so filter.pl is found by perl. "
+ "Input files are all the");
System.out.println(" daily logs for days in month 2006-04");
}
public void fail(String message) {
System.err.println(message);
System.err.println("Try -help for more information");
throw new IllegalArgumentException(message);
}
// --------------------------------------------
protected String getHadoopClientHome() {
String h = env_.getProperty("HADOOP_PREFIX"); // standard Hadoop
if (h == null) {
//fail("Missing required environment variable: HADOOP_PREFIX");
h = "UNDEF";
}
return h;
}
protected boolean isLocalHadoop() {
return StreamUtil.isLocalJobTracker(jobConf_);
}
@Deprecated
protected String getClusterNick() {
return "default";
}
/** @return path to the created Jar file or null if no files are necessary.
*/
protected String packageJobJar() throws IOException {
ArrayList<String> unjarFiles = new ArrayList<String>();
// Runtime code: ship same version of code as self (job submitter code)
// usually found in: build/contrib or build/hadoop-<version>-dev-streaming.jar
// First try an explicit spec: it's too hard to find our own location in this case:
// $HADOOP_PREFIX/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar
// where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_PREFIX
String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir
if (runtimeClasses == null) {
runtimeClasses = StreamUtil.findInClasspath(StreamJob.class.getName());
}
if (runtimeClasses == null) {
throw new IOException("runtime classes not found: " + getClass().getPackage());
} else {
msg("Found runtime classes in: " + runtimeClasses);
}
if (isLocalHadoop()) {
// don't package class files (they might get unpackaged in "." and then
// hide the intended CLASSPATH entry)
// we still package everything else (so that scripts and executable are found in
// Task workdir like distributed Hadoop)
} else {
if (new File(runtimeClasses).isDirectory()) {
packageFiles_.add(runtimeClasses);
} else {
unjarFiles.add(runtimeClasses);
}
}
if (packageFiles_.size() + unjarFiles.size() == 0) {
return null;
}
String tmp = jobConf_.get("stream.tmpdir"); //, "/tmp/${mapreduce.job.user.name}/"
File tmpDir = (tmp == null) ? null : new File(tmp);
// tmpDir=null means OS default tmp dir
File jobJar = File.createTempFile("streamjob", ".jar", tmpDir);
System.out.println("packageJobJar: " + packageFiles_ + " " + unjarFiles + " " + jobJar
+ " tmpDir=" + tmpDir);
if (debug_ == 0) {
jobJar.deleteOnExit();
}
JarBuilder builder = new JarBuilder();
if (verbose_) {
builder.setVerbose(true);
}
String jobJarName = jobJar.getAbsolutePath();
builder.merge(packageFiles_, unjarFiles, jobJarName);
return jobJarName;
}
/**
* get the uris of all the files/caches
*/
protected void getURIs(String lcacheArchives, String lcacheFiles) {
String archives[] = StringUtils.getStrings(lcacheArchives);
String files[] = StringUtils.getStrings(lcacheFiles);
fileURIs = StringUtils.stringToURI(files);
archiveURIs = StringUtils.stringToURI(archives);
}
protected void setJobConf() throws IOException {
if (additionalConfSpec_ != null) {
LOG.warn("-additionalconfspec option is deprecated, please use -conf instead.");
config_.addResource(new Path(additionalConfSpec_));
}
// general MapRed job properties
jobConf_ = new JobConf(config_, StreamJob.class);
// All streaming jobs get the task timeout value
// from the configuration settings.
// The correct FS must be set before this is called!
// (to resolve local vs. dfs drive letter differences)
// (mapreduce.job.working.dir will be lazily initialized ONCE and depends on FS)
for (int i = 0; i < inputSpecs_.size(); i++) {
FileInputFormat.addInputPaths(jobConf_,
(String) inputSpecs_.get(i));
}
String defaultPackage = this.getClass().getPackage().getName();
Class c;
Class fmt = null;
if (inReaderSpec_ == null && inputFormatSpec_ == null) {
fmt = TextInputFormat.class;
} else if (inputFormatSpec_ != null) {
if (inputFormatSpec_.equals(TextInputFormat.class.getName())
|| inputFormatSpec_.equals(TextInputFormat.class.getCanonicalName())
|| inputFormatSpec_.equals(TextInputFormat.class.getSimpleName())) {
fmt = TextInputFormat.class;
} else if (inputFormatSpec_.equals(KeyValueTextInputFormat.class
.getName())
|| inputFormatSpec_.equals(KeyValueTextInputFormat.class
.getCanonicalName())
|| inputFormatSpec_.equals(KeyValueTextInputFormat.class.getSimpleName())) {
if (inReaderSpec_ == null) {
fmt = KeyValueTextInputFormat.class;
}
} else if (inputFormatSpec_.equals(SequenceFileInputFormat.class
.getName())
|| inputFormatSpec_
.equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class
.getCanonicalName())
|| inputFormatSpec_
.equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class.getSimpleName())) {
if (inReaderSpec_ == null) {
fmt = SequenceFileInputFormat.class;
}
} else if (inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class
.getName())
|| inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class
.getCanonicalName())
|| inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class.getSimpleName())) {
fmt = SequenceFileAsTextInputFormat.class;
} else {
c = StreamUtil.goodClassOrNull(jobConf_, inputFormatSpec_, defaultPackage);
if (c != null) {
fmt = c;
} else {
fail("-inputformat : class not found : " + inputFormatSpec_);
}
}
}
if (fmt == null) {
fmt = StreamInputFormat.class;
}
jobConf_.setInputFormat(fmt);
if (ioSpec_ != null) {
jobConf_.set("stream.map.input", ioSpec_);
jobConf_.set("stream.map.output", ioSpec_);
jobConf_.set("stream.reduce.input", ioSpec_);
jobConf_.set("stream.reduce.output", ioSpec_);
}
Class<? extends IdentifierResolver> idResolverClass =
jobConf_.getClass("stream.io.identifier.resolver.class",
IdentifierResolver.class, IdentifierResolver.class);
IdentifierResolver idResolver = ReflectionUtils.newInstance(idResolverClass, jobConf_);
idResolver.resolve(jobConf_.get("stream.map.input", IdentifierResolver.TEXT_ID));
jobConf_.setClass("stream.map.input.writer.class",
idResolver.getInputWriterClass(), InputWriter.class);
idResolver.resolve(jobConf_.get("stream.reduce.input", IdentifierResolver.TEXT_ID));
jobConf_.setClass("stream.reduce.input.writer.class",
idResolver.getInputWriterClass(), InputWriter.class);
jobConf_.set("stream.addenvironment", addTaskEnvironment_);
boolean isMapperACommand = false;
if (mapCmd_ != null) {
c = StreamUtil.goodClassOrNull(jobConf_, mapCmd_, defaultPackage);
if (c != null) {
jobConf_.setMapperClass(c);
} else {
isMapperACommand = true;
jobConf_.setMapperClass(PipeMapper.class);
jobConf_.setMapRunnerClass(PipeMapRunner.class);
jobConf_.set("stream.map.streamprocessor",
URLEncoder.encode(mapCmd_, "UTF-8"));
}
}
if (comCmd_ != null) {
c = StreamUtil.goodClassOrNull(jobConf_, comCmd_, defaultPackage);
if (c != null) {
jobConf_.setCombinerClass(c);
} else {
jobConf_.setCombinerClass(PipeCombiner.class);
jobConf_.set("stream.combine.streamprocessor", URLEncoder.encode(
comCmd_, "UTF-8"));
}
}
if (numReduceTasksSpec_!= null) {
int numReduceTasks = Integer.parseInt(numReduceTasksSpec_);
jobConf_.setNumReduceTasks(numReduceTasks);
}
boolean isReducerACommand = false;
if (redCmd_ != null) {
if (redCmd_.equals(REDUCE_NONE)) {
jobConf_.setNumReduceTasks(0);
}
if (jobConf_.getNumReduceTasks() != 0) {
if (redCmd_.compareToIgnoreCase("aggregate") == 0) {
jobConf_.setReducerClass(ValueAggregatorReducer.class);
jobConf_.setCombinerClass(ValueAggregatorCombiner.class);
} else {
c = StreamUtil.goodClassOrNull(jobConf_, redCmd_, defaultPackage);
if (c != null) {
jobConf_.setReducerClass(c);
} else {
isReducerACommand = true;
jobConf_.setReducerClass(PipeReducer.class);
jobConf_.set("stream.reduce.streamprocessor", URLEncoder.encode(
redCmd_, "UTF-8"));
}
}
}
}
idResolver.resolve(jobConf_.get("stream.map.output",
IdentifierResolver.TEXT_ID));
jobConf_.setClass("stream.map.output.reader.class",
idResolver.getOutputReaderClass(), OutputReader.class);
if (isMapperACommand || jobConf_.get("stream.map.output") != null) {
// if mapper is a command, then map output key/value classes come from the
// idResolver
jobConf_.setMapOutputKeyClass(idResolver.getOutputKeyClass());
jobConf_.setMapOutputValueClass(idResolver.getOutputValueClass());
if (jobConf_.getNumReduceTasks() == 0) {
jobConf_.setOutputKeyClass(idResolver.getOutputKeyClass());
jobConf_.setOutputValueClass(idResolver.getOutputValueClass());
}
}
idResolver.resolve(jobConf_.get("stream.reduce.output",
IdentifierResolver.TEXT_ID));
jobConf_.setClass("stream.reduce.output.reader.class",
idResolver.getOutputReaderClass(), OutputReader.class);
if (isReducerACommand || jobConf_.get("stream.reduce.output") != null) {
// if reducer is a command, then output key/value classes come from the
// idResolver
jobConf_.setOutputKeyClass(idResolver.getOutputKeyClass());
jobConf_.setOutputValueClass(idResolver.getOutputValueClass());
}
if (inReaderSpec_ != null) {
String[] args = inReaderSpec_.split(",");
String readerClass = args[0];
// this argument can only be a Java class
c = StreamUtil.goodClassOrNull(jobConf_, readerClass, defaultPackage);
if (c != null) {
jobConf_.set("stream.recordreader.class", c.getName());
} else {
fail("-inputreader: class not found: " + readerClass);
}
for (int i = 1; i < args.length; i++) {
String[] nv = args[i].split("=", 2);
String k = "stream.recordreader." + nv[0];
String v = (nv.length > 1) ? nv[1] : "";
jobConf_.set(k, v);
}
}
FileOutputFormat.setOutputPath(jobConf_, new Path(output_));
fmt = null;
if (outputFormatSpec_!= null) {
c = StreamUtil.goodClassOrNull(jobConf_, outputFormatSpec_, defaultPackage);
if (c != null) {
fmt = c;
} else {
fail("-outputformat : class not found : " + outputFormatSpec_);
}
}
if (fmt == null) {
fmt = TextOutputFormat.class;
}
if (lazyOutput_) {
LazyOutputFormat.setOutputFormatClass(jobConf_, fmt);
} else {
jobConf_.setOutputFormat(fmt);
}
if (partitionerSpec_!= null) {
c = StreamUtil.goodClassOrNull(jobConf_, partitionerSpec_, defaultPackage);
if (c != null) {
jobConf_.setPartitionerClass(c);
} else {
fail("-partitioner : class not found : " + partitionerSpec_);
}
}
if(mapDebugSpec_ != null){
jobConf_.setMapDebugScript(mapDebugSpec_);
}
if(reduceDebugSpec_ != null){
jobConf_.setReduceDebugScript(reduceDebugSpec_);
}
// last, allow user to override anything
// (although typically used with properties we didn't touch)
jar_ = packageJobJar();
if (jar_ != null) {
jobConf_.setJar(jar_);
}
if ((cacheArchives != null) || (cacheFiles != null)){
getURIs(cacheArchives, cacheFiles);
boolean b = DistributedCache.checkURIs(fileURIs, archiveURIs);
if (!b)
fail(LINK_URI);
}
// set the jobconf for the caching parameters
if (cacheArchives != null)
DistributedCache.setCacheArchives(archiveURIs, jobConf_);
if (cacheFiles != null)
DistributedCache.setCacheFiles(fileURIs, jobConf_);
if (verbose_) {
listJobConfProperties();
}
msg("submitting to jobconf: " + getJobTrackerHostPort());
}
/**
* Prints out the jobconf properties on stdout
* when verbose is specified.
*/
protected void listJobConfProperties()
{
msg("==== JobConf properties:");
TreeMap<String,String> sorted = new TreeMap<String,String>();
for (final Map.Entry<String, String> en : jobConf_) {
sorted.put(en.getKey(), en.getValue());
}
for (final Map.Entry<String,String> en: sorted.entrySet()) {
msg(en.getKey() + "=" + en.getValue());
}
msg("====");
}
protected String getJobTrackerHostPort() {
return jobConf_.get(JTConfig.JT_IPC_ADDRESS);
}
// Based on JobClient
public int submitAndMonitorJob() throws IOException {
if (jar_ != null && isLocalHadoop()) {
// getAbs became required when shell and subvm have different working dirs...
File wd = new File(".").getAbsoluteFile();
RunJar.unJar(new File(jar_), wd);
}
// if jobConf_ changes must recreate a JobClient
jc_ = new JobClient(jobConf_);
running_ = null;
try {
running_ = jc_.submitJob(jobConf_);
jobId_ = running_.getID();
if (background_) {
LOG.info("Job is running in background.");
} else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
LOG.error("Job not successful!");
return 1;
}
LOG.info("Output directory: " + output_);
} catch(FileNotFoundException fe) {
LOG.error("Error launching job , bad input path : " + fe.getMessage());
return 2;
} catch(InvalidJobConfException je) {
LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
return 3;
} catch(FileAlreadyExistsException fae) {
LOG.error("Error launching job , Output path already exists : "
+ fae.getMessage());
return 4;
} catch(IOException ioe) {
LOG.error("Error Launching job : " + ioe.getMessage());
return 5;
} catch (InterruptedException ie) {
LOG.error("Error monitoring job : " + ie.getMessage());
return 6;
} finally {
jc_.close();
}
return 0;
}
protected String[] argv_;
protected boolean background_;
protected boolean verbose_;
protected boolean detailedUsage_;
protected boolean printUsage = false;
protected int debug_;
protected Environment env_;
protected String jar_;
protected boolean localHadoop_;
protected Configuration config_;
protected JobConf jobConf_;
protected JobClient jc_;
// command-line arguments
protected ArrayList<String> inputSpecs_ = new ArrayList<String>();
protected TreeSet<String> seenPrimary_ = new TreeSet<String>();
protected boolean hasSimpleInputSpecs_;
protected ArrayList<String> packageFiles_ = new ArrayList<String>();
protected ArrayList<String> shippedCanonFiles_ = new ArrayList<String>();
//protected TreeMap<String, String> userJobConfProps_ = new TreeMap<String, String>();
protected String output_;
protected String mapCmd_;
protected String comCmd_;
protected String redCmd_;
protected String cacheFiles;
protected String cacheArchives;
protected URI[] fileURIs;
protected URI[] archiveURIs;
protected String inReaderSpec_;
protected String inputFormatSpec_;
protected String outputFormatSpec_;
protected String partitionerSpec_;
protected String numReduceTasksSpec_;
protected String additionalConfSpec_;
protected String mapDebugSpec_;
protected String reduceDebugSpec_;
protected String ioSpec_;
protected boolean lazyOutput_;
// Use to communicate config to the external processes (ex env.var.HADOOP_USER)
// encoding "a=b c=d"
protected String addTaskEnvironment_;
protected boolean outputSingleNode_;
protected long minRecWrittenToEnableSkip_;
protected RunningJob running_;
protected JobID jobId_;
protected static final String LINK_URI = "You need to specify the uris as scheme://path#linkname," +
"Please specify a different link name for all of your caching URIs";
}
| 41,551 | 36.705989 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.net.InetAddress;
import java.nio.charset.Charset;
import java.util.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
/**
* This is a class used to get the current environment
* on the host machines running the map/reduce. This class
* assumes that setting the environment in streaming is
* allowed on windows/ix/linuz/freebsd/sunos/solaris/hp-ux
*/
@InterfaceAudience.Private
public class Environment extends Properties {
private static final long serialVersionUID = 1L;
public Environment() throws IOException {
// Extend this code to fit all operating
// environments that you expect to run in
// http://lopica.sourceforge.net/os.html
String command = null;
String OS = System.getProperty("os.name");
String lowerOs = StringUtils.toLowerCase(OS);
if (OS.indexOf("Windows") > -1) {
command = "cmd /C set";
} else if (lowerOs.indexOf("ix") > -1 || lowerOs.indexOf("linux") > -1
|| lowerOs.indexOf("freebsd") > -1 || lowerOs.indexOf("sunos") > -1
|| lowerOs.indexOf("solaris") > -1 || lowerOs.indexOf("hp-ux") > -1) {
command = "env";
} else if (lowerOs.startsWith("mac os x") || lowerOs.startsWith("darwin")) {
command = "env";
} else {
// Add others here
}
if (command == null) {
throw new RuntimeException("Operating system " + OS + " not supported by this class");
}
// Read the environment variables
Process pid = Runtime.getRuntime().exec(command);
BufferedReader in = new BufferedReader(
new InputStreamReader(pid.getInputStream(), Charset.forName("UTF-8")));
try {
while (true) {
String line = in.readLine();
if (line == null)
break;
int p = line.indexOf("=");
if (p != -1) {
String name = line.substring(0, p);
String value = line.substring(p + 1);
setProperty(name, value);
}
}
in.close();
in = null;
} finally {
IOUtils.closeStream(in);
}
try {
pid.waitFor();
} catch (InterruptedException e) {
throw new IOException(e.getMessage());
}
}
// to be used with Runtime.exec(String[] cmdarray, String[] envp)
String[] toArray() {
String[] arr = new String[super.size()];
Enumeration<Object> it = super.keys();
int i = -1;
while (it.hasMoreElements()) {
String key = (String) it.nextElement();
String val = (String) get(key);
i++;
arr[i] = key + "=" + val;
}
return arr;
}
public Map<String, String> toMap() {
Map<String, String> map = new HashMap<String, String>();
Enumeration<Object> it = super.keys();
while (it.hasMoreElements()) {
String key = (String) it.nextElement();
String val = (String) get(key);
map.put(key, val);
}
return map;
}
public String getHost() {
String host = getProperty("HOST");
if (host == null) {
// HOST isn't always in the environment
try {
host = InetAddress.getLocalHost().getHostName();
} catch (IOException io) {
io.printStackTrace();
}
}
return host;
}
}
| 4,126 | 30.030075 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import org.apache.hadoop.mapred.JobConf;
public class PipeCombiner extends PipeReducer {
String getPipeCommand(JobConf job) {
String str = job.get("stream.combine.streamprocessor");
try {
if (str != null) {
return URLDecoder.decode(str, "UTF-8");
}
} catch (UnsupportedEncodingException e) {
System.err.println("stream.combine.streamprocessor" +
" in jobconf not found");
}
return null;
}
boolean getDoPipe() {
return (getPipeCommand(job_) != null);
}
}
| 1,459 | 32.953488 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Iterator;
import java.net.URLDecoder;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.streaming.io.InputWriter;
import org.apache.hadoop.streaming.io.OutputReader;
import org.apache.hadoop.io.Writable;
/** A generic Reducer bridge.
* It delegates operations to an external program via stdin and stdout.
*/
public class PipeReducer extends PipeMapRed implements Reducer {
private byte[] reduceOutFieldSeparator;
private byte[] reduceInputFieldSeparator;
private int numOfReduceOutputKeyFields = 1;
private boolean skipping = false;
String getPipeCommand(JobConf job) {
String str = job.get("stream.reduce.streamprocessor");
if (str == null) {
return str;
}
try {
return URLDecoder.decode(str, "UTF-8");
} catch (UnsupportedEncodingException e) {
System.err.println("stream.reduce.streamprocessor in jobconf not found");
return null;
}
}
boolean getDoPipe() {
String argv = getPipeCommand(job_);
// Currently: null is identity reduce. REDUCE_NONE is no-map-outputs.
return (argv != null) && !StreamJob.REDUCE_NONE.equals(argv);
}
public void configure(JobConf job) {
super.configure(job);
//disable the auto increment of the counter. For streaming, no of
//processed records could be different(equal or less) than the no of
//records input.
SkipBadRecords.setAutoIncrReducerProcCount(job, false);
skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
try {
reduceOutFieldSeparator = job_.get("stream.reduce.output.field.separator", "\t").getBytes("UTF-8");
reduceInputFieldSeparator = job_.get("stream.reduce.input.field.separator", "\t").getBytes("UTF-8");
this.numOfReduceOutputKeyFields = job_.getInt("stream.num.reduce.output.key.fields", 1);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
}
}
public void reduce(Object key, Iterator values, OutputCollector output,
Reporter reporter) throws IOException {
// init
if (doPipe_ && outThread_ == null) {
startOutputThreads(output, reporter);
}
try {
while (values.hasNext()) {
Writable val = (Writable) values.next();
numRecRead_++;
maybeLogRecord();
if (doPipe_) {
if (outerrThreadsThrowable != null) {
mapRedFinished();
throw new IOException("MROutput/MRErrThread failed:",
outerrThreadsThrowable);
}
inWriter_.writeKey(key);
inWriter_.writeValue(val);
} else {
// "identity reduce"
output.collect(key, val);
}
}
if(doPipe_ && skipping) {
//flush the streams on every record input if running in skip mode
//so that we don't buffer other records surrounding a bad record.
clientOut_.flush();
}
} catch (IOException io) {
// a common reason to get here is failure of the subprocess.
// Document that fact, if possible.
String extraInfo = "";
try {
int exitVal = sim.exitValue();
if (exitVal == 0) {
extraInfo = "subprocess exited successfully\n";
} else {
extraInfo = "subprocess exited with error code " + exitVal + "\n";
};
} catch (IllegalThreadStateException e) {
// hmm, but child is still running. go figure.
extraInfo = "subprocess still running\n";
};
mapRedFinished();
throw new IOException(extraInfo + getContext() + io.getMessage());
}
}
public void close() {
mapRedFinished();
}
@Override
public byte[] getInputSeparator() {
return reduceInputFieldSeparator;
}
@Override
public byte[] getFieldSeparator() {
return reduceOutFieldSeparator;
}
@Override
public int getNumOfKeyFields() {
return numOfReduceOutputKeyFields;
}
@Override
InputWriter createInputWriter() throws IOException {
return super.createInputWriter(reduceInputWriterClass_);
}
@Override
OutputReader createOutputReader() throws IOException {
return super.createOutputReader(reduceOutputReaderClass_);
}
}
| 5,355 | 31.858896 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.commons.logging.*;
/**
* Shared functionality for hadoopStreaming formats.
* A custom reader can be defined to be a RecordReader with the constructor below
* and is selected with the option bin/hadoopStreaming -inputreader ...
* @see StreamXmlRecordReader
*/
public abstract class StreamBaseRecordReader implements RecordReader<Text, Text> {
protected static final Log LOG = LogFactory.getLog(StreamBaseRecordReader.class.getName());
// custom JobConf properties for this class are prefixed with this namespace
final static String CONF_NS = "stream.recordreader.";
public StreamBaseRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
JobConf job, FileSystem fs) throws IOException {
in_ = in;
split_ = split;
start_ = split_.getStart();
length_ = split_.getLength();
end_ = start_ + length_;
splitName_ = split_.getPath().getName();
reporter_ = reporter;
job_ = job;
fs_ = fs;
statusMaxRecordChars_ = job_.getInt(CONF_NS + "statuschars", 200);
}
/// RecordReader API
/** Read a record. Implementation should call numRecStats at the end
*/
public abstract boolean next(Text key, Text value) throws IOException;
/** Returns the current position in the input. */
public synchronized long getPos() throws IOException {
return in_.getPos();
}
/** Close this to future operations.*/
public synchronized void close() throws IOException {
in_.close();
}
public float getProgress() throws IOException {
if (end_ == start_) {
return 1.0f;
} else {
return ((float)(in_.getPos() - start_)) / ((float)(end_ - start_));
}
}
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
/// StreamBaseRecordReader API
/** Implementation should seek forward in_ to the first byte of the next record.
* The initial byte offset in the stream is arbitrary.
*/
public abstract void seekNextRecordBoundary() throws IOException;
void numRecStats(byte[] record, int start, int len) throws IOException {
numRec_++;
if (numRec_ == nextStatusRec_) {
String recordStr = new String(record, start, Math.min(len, statusMaxRecordChars_), "UTF-8");
nextStatusRec_ += 100;//*= 10;
String status = getStatus(recordStr);
LOG.info(status);
reporter_.setStatus(status);
}
}
long lastMem = 0;
String getStatus(CharSequence record) {
long pos = -1;
try {
pos = getPos();
} catch (IOException io) {
}
String recStr;
if (record.length() > statusMaxRecordChars_) {
recStr = record.subSequence(0, statusMaxRecordChars_) + "...";
} else {
recStr = record.toString();
}
String unqualSplit = split_.getPath().getName() + ":" +
split_.getStart() + "+" + split_.getLength();
String status = "HSTR " + StreamUtil.HOST + " " + numRec_ + ". pos=" + pos + " " + unqualSplit
+ " Processing record=" + recStr;
status += " " + splitName_;
return status;
}
FSDataInputStream in_;
FileSplit split_;
long start_;
long end_;
long length_;
String splitName_;
Reporter reporter_;
JobConf job_;
FileSystem fs_;
int numRec_ = 0;
int nextStatusRec_ = 1;
int statusMaxRecordChars_;
}
| 4,637 | 30.127517 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import java.io.*;
import java.util.regex.*;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
/** A way to interpret XML fragments as Mapper input records.
* Values are XML subtrees delimited by configurable tags.
* Keys could be the value of a certain attribute in the XML subtree,
* but this is left to the stream processor application.
*
* The name-value properties that StreamXmlRecordReader understands are:
* String begin (chars marking beginning of record)
* String end (chars marking end of record)
* int maxrec (maximum record size)
* int lookahead(maximum lookahead to sync CDATA)
* boolean slowmatch
*/
public class StreamXmlRecordReader extends StreamBaseRecordReader {
public StreamXmlRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
JobConf job, FileSystem fs) throws IOException {
super(in, split, reporter, job, fs);
beginMark_ = checkJobGet(CONF_NS + "begin");
endMark_ = checkJobGet(CONF_NS + "end");
maxRecSize_ = job_.getInt(CONF_NS + "maxrec", 50 * 1000);
lookAhead_ = job_.getInt(CONF_NS + "lookahead", 2 * maxRecSize_);
synched_ = false;
slowMatch_ = job_.getBoolean(CONF_NS + "slowmatch", false);
if (slowMatch_) {
beginPat_ = makePatternCDataOrMark(beginMark_);
endPat_ = makePatternCDataOrMark(endMark_);
}
init();
}
public void init() throws IOException {
LOG.info("StreamBaseRecordReader.init: " + " start_=" + start_ + " end_=" + end_ + " length_="
+ length_ + " start_ > in_.getPos() =" + (start_ > in_.getPos()) + " " + start_ + " > "
+ in_.getPos());
if (start_ > in_.getPos()) {
in_.seek(start_);
}
pos_ = start_;
bin_ = new BufferedInputStream(in_);
seekNextRecordBoundary();
}
int numNext = 0;
public synchronized boolean next(Text key, Text value) throws IOException {
numNext++;
if (pos_ >= end_) {
return false;
}
DataOutputBuffer buf = new DataOutputBuffer();
if (!readUntilMatchBegin()) {
return false;
}
if (pos_ >= end_ || !readUntilMatchEnd(buf)) {
return false;
}
// There is only one elem..key/value splitting is not done here.
byte[] record = new byte[buf.getLength()];
System.arraycopy(buf.getData(), 0, record, 0, record.length);
numRecStats(record, 0, record.length);
key.set(record);
value.set("");
return true;
}
public void seekNextRecordBoundary() throws IOException {
readUntilMatchBegin();
}
boolean readUntilMatchBegin() throws IOException {
if (slowMatch_) {
return slowReadUntilMatch(beginPat_, false, null);
} else {
return fastReadUntilMatch(beginMark_, false, null);
}
}
private boolean readUntilMatchEnd(DataOutputBuffer buf) throws IOException {
if (slowMatch_) {
return slowReadUntilMatch(endPat_, true, buf);
} else {
return fastReadUntilMatch(endMark_, true, buf);
}
}
private boolean slowReadUntilMatch(Pattern markPattern, boolean includePat,
DataOutputBuffer outBufOrNull) throws IOException {
byte[] buf = new byte[Math.max(lookAhead_, maxRecSize_)];
int read = 0;
bin_.mark(Math.max(lookAhead_, maxRecSize_) + 2); //mark to invalidate if we read more
read = bin_.read(buf);
if (read == -1) return false;
String sbuf = new String(buf, 0, read, "UTF-8");
Matcher match = markPattern.matcher(sbuf);
firstMatchStart_ = NA;
firstMatchEnd_ = NA;
int bufPos = 0;
int state = synched_ ? CDATA_OUT : CDATA_UNK;
int s = 0;
while (match.find(bufPos)) {
int input;
if (match.group(1) != null) {
input = CDATA_BEGIN;
} else if (match.group(2) != null) {
input = CDATA_END;
firstMatchStart_ = NA; // |<DOC CDATA[ </DOC> ]]> should keep it
} else {
input = RECORD_MAYBE;
}
if (input == RECORD_MAYBE) {
if (firstMatchStart_ == NA) {
firstMatchStart_ = match.start();
firstMatchEnd_ = match.end();
}
}
state = nextState(state, input, match.start());
if (state == RECORD_ACCEPT) {
break;
}
bufPos = match.end();
s++;
}
if (state != CDATA_UNK) {
synched_ = true;
}
boolean matched = (firstMatchStart_ != NA) && (state == RECORD_ACCEPT || state == CDATA_UNK);
if (matched) {
int endPos = includePat ? firstMatchEnd_ : firstMatchStart_;
bin_.reset();
for (long skiplen = endPos; skiplen > 0; ) {
skiplen -= bin_.skip(skiplen); // Skip succeeds as we have read this buffer
}
pos_ += endPos;
if (outBufOrNull != null) {
outBufOrNull.writeBytes(sbuf.substring(0,endPos));
}
}
return matched;
}
// states
final static int CDATA_IN = 10;
final static int CDATA_OUT = 11;
final static int CDATA_UNK = 12;
final static int RECORD_ACCEPT = 13;
// inputs
final static int CDATA_BEGIN = 20;
final static int CDATA_END = 21;
final static int RECORD_MAYBE = 22;
/* also updates firstMatchStart_;*/
int nextState(int state, int input, int bufPos) {
switch (state) {
case CDATA_UNK:
case CDATA_OUT:
switch (input) {
case CDATA_BEGIN:
return CDATA_IN;
case CDATA_END:
if (state == CDATA_OUT) {
//System.out.println("buggy XML " + bufPos);
}
return CDATA_OUT;
case RECORD_MAYBE:
return (state == CDATA_UNK) ? CDATA_UNK : RECORD_ACCEPT;
}
break;
case CDATA_IN:
return (input == CDATA_END) ? CDATA_OUT : CDATA_IN;
}
throw new IllegalStateException(state + " " + input + " " + bufPos + " " + splitName_);
}
Pattern makePatternCDataOrMark(String escapedMark) {
StringBuffer pat = new StringBuffer();
addGroup(pat, StreamUtil.regexpEscape("CDATA[")); // CDATA_BEGIN
addGroup(pat, StreamUtil.regexpEscape("]]>")); // CDATA_END
addGroup(pat, escapedMark); // RECORD_MAYBE
return Pattern.compile(pat.toString());
}
void addGroup(StringBuffer pat, String escapedGroup) {
if (pat.length() > 0) {
pat.append("|");
}
pat.append("(");
pat.append(escapedGroup);
pat.append(")");
}
boolean fastReadUntilMatch(String textPat, boolean includePat, DataOutputBuffer outBufOrNull) throws IOException {
byte[] cpat = textPat.getBytes("UTF-8");
int m = 0;
boolean match = false;
int msup = cpat.length;
int LL = 120000 * 10;
bin_.mark(LL); // large number to invalidate mark
while (true) {
int b = bin_.read();
if (b == -1) break;
byte c = (byte) b; // this assumes eight-bit matching. OK with UTF-8
if (c == cpat[m]) {
m++;
if (m == msup) {
match = true;
break;
}
} else {
bin_.mark(LL); // rest mark so we could jump back if we found a match
if (outBufOrNull != null) {
outBufOrNull.write(cpat, 0, m);
outBufOrNull.write(c);
}
pos_ += m + 1; // skip m chars, +1 for 'c'
m = 0;
}
}
if (!includePat && match) {
bin_.reset();
} else if (outBufOrNull != null) {
outBufOrNull.write(cpat);
pos_ += msup;
}
return match;
}
String checkJobGet(String prop) throws IOException {
String val = job_.get(prop);
if (val == null) {
throw new IOException("JobConf: missing required property: " + prop);
}
return val;
}
String beginMark_;
String endMark_;
Pattern beginPat_;
Pattern endPat_;
boolean slowMatch_;
int lookAhead_; // bytes to read to try to synch CDATA/non-CDATA. Should be more than max record size
int maxRecSize_;
BufferedInputStream bin_; // Wrap FSDataInputStream for efficient backward seeks
long pos_; // Keep track on position with respect encapsulated FSDataInputStream
final static int NA = -1;
int firstMatchStart_ = 0; // candidate record boundary. Might just be CDATA.
int firstMatchEnd_ = 0;
boolean synched_;
}
| 9,295 | 29.781457 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataInput;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.CharacterCodingException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.streaming.PipeMapRed;
import org.apache.hadoop.streaming.StreamKeyValUtil;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.UTF8ByteArrayUtils;
/**
* OutputReader that reads the client's output as text.
*/
public class TextOutputReader extends OutputReader<Text, Text> {
private LineReader lineReader;
private byte[] bytes;
private DataInput clientIn;
private Configuration conf;
private int numKeyFields;
private byte[] separator;
private Text key;
private Text value;
private Text line;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientIn = pipeMapRed.getClientInput();
conf = pipeMapRed.getConfiguration();
numKeyFields = pipeMapRed.getNumOfKeyFields();
separator = pipeMapRed.getFieldSeparator();
lineReader = new LineReader((InputStream)clientIn, conf);
key = new Text();
value = new Text();
line = new Text();
}
@Override
public boolean readKeyValue() throws IOException {
if (lineReader.readLine(line) <= 0) {
return false;
}
bytes = line.getBytes();
splitKeyVal(bytes, line.getLength(), key, value);
line.clear();
return true;
}
@Override
public Text getCurrentKey() throws IOException {
return key;
}
@Override
public Text getCurrentValue() throws IOException {
return value;
}
@Override
public String getLastOutput() {
if (bytes != null) {
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
return "<undecodable>";
}
} else {
return null;
}
}
// split a UTF-8 line into key and value
private void splitKeyVal(byte[] line, int length, Text key, Text val)
throws IOException {
// Need to find numKeyFields separators
int pos = UTF8ByteArrayUtils.findBytes(line, 0, length, separator);
for(int k=1; k<numKeyFields && pos!=-1; k++) {
pos = UTF8ByteArrayUtils.findBytes(line, pos + separator.length,
length, separator);
}
try {
if (pos == -1) {
key.set(line, 0, length);
val.set("");
} else {
StreamKeyValUtil.splitKeyVal(line, 0, length, key, val, pos,
separator.length);
}
} catch (CharacterCodingException e) {
throw new IOException(StringUtils.stringifyException(e));
}
}
}
| 3,583 | 28.866667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.ByteArrayOutputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.streaming.PipeMapRed;
/**
* InputWriter that writes the client's input as raw bytes.
*/
public class RawBytesInputWriter extends InputWriter<Writable, Writable> {
private DataOutput clientOut;
private ByteArrayOutputStream bufferOut;
private DataOutputStream bufferDataOut;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientOut = pipeMapRed.getClientOutput();
bufferOut = new ByteArrayOutputStream();
bufferDataOut = new DataOutputStream(bufferOut);
}
@Override
public void writeKey(Writable key) throws IOException {
writeRawBytes(key);
}
@Override
public void writeValue(Writable value) throws IOException {
writeRawBytes(value);
}
private void writeRawBytes(Writable writable) throws IOException {
if (writable instanceof BytesWritable) {
BytesWritable bw = (BytesWritable) writable;
byte[] bytes = bw.getBytes();
int length = bw.getLength();
clientOut.writeInt(length);
clientOut.write(bytes, 0, length);
} else {
bufferOut.reset();
writable.write(bufferDataOut);
byte[] bytes = bufferOut.toByteArray();
clientOut.writeInt(bytes.length);
clientOut.write(bytes);
}
}
}
| 2,354 | 30.824324 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.streaming.PipeMapRed;
import org.apache.hadoop.typedbytes.TypedBytesOutput;
import org.apache.hadoop.typedbytes.TypedBytesWritableOutput;
/**
* InputWriter that writes the client's input as typed bytes.
*/
public class TypedBytesInputWriter extends InputWriter<Object, Object> {
private TypedBytesOutput tbOut;
private TypedBytesWritableOutput tbwOut;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
DataOutput clientOut = pipeMapRed.getClientOutput();
tbOut = new TypedBytesOutput(clientOut);
tbwOut = new TypedBytesWritableOutput(clientOut);
}
@Override
public void writeKey(Object key) throws IOException {
writeTypedBytes(key);
}
@Override
public void writeValue(Object value) throws IOException {
writeTypedBytes(value);
}
private void writeTypedBytes(Object value) throws IOException {
if (value instanceof Writable) {
tbwOut.write((Writable) value);
} else {
tbOut.write(value);
}
}
}
| 2,006 | 30.359375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.streaming.PipeMapRed;
import org.apache.hadoop.typedbytes.TypedBytesInput;
import org.apache.hadoop.typedbytes.TypedBytesWritable;
/**
* OutputReader that reads the client's output as typed bytes.
*/
public class TypedBytesOutputReader extends
OutputReader<TypedBytesWritable, TypedBytesWritable> {
private byte[] bytes;
private DataInput clientIn;
private TypedBytesWritable key;
private TypedBytesWritable value;
private TypedBytesInput in;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientIn = pipeMapRed.getClientInput();
key = new TypedBytesWritable();
value = new TypedBytesWritable();
in = new TypedBytesInput(clientIn);
}
@Override
public boolean readKeyValue() throws IOException {
bytes = in.readRaw();
if (bytes == null) {
return false;
}
key.set(bytes, 0, bytes.length);
bytes = in.readRaw();
value.set(bytes, 0, bytes.length);
return true;
}
@Override
public TypedBytesWritable getCurrentKey() throws IOException {
return key;
}
@Override
public TypedBytesWritable getCurrentValue() throws IOException {
return value;
}
@Override
public String getLastOutput() {
if (bytes != null) {
return new TypedBytesWritable(bytes).toString();
} else {
return null;
}
}
}
| 2,313 | 27.567901 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/InputWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.IOException;
import org.apache.hadoop.streaming.PipeMapRed;
/**
* Abstract base for classes that write the client's input.
*/
public abstract class InputWriter<K, V> {
/**
* Initializes the InputWriter. This method has to be called before calling
* any of the other methods.
*/
public void initialize(PipeMapRed pipeMapRed) throws IOException {
// nothing here yet, but that might change in the future
}
/**
* Writes an input key.
*/
public abstract void writeKey(K key) throws IOException;
/**
* Writes an input value.
*/
public abstract void writeValue(V value) throws IOException;
}
| 1,500 | 30.270833 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.streaming.PipeMapRed;
/**
* InputWriter that writes the client's input as text.
*/
public class TextInputWriter extends InputWriter<Object, Object> {
protected DataOutput clientOut;
private byte[] inputSeparator;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientOut = pipeMapRed.getClientOutput();
inputSeparator = pipeMapRed.getInputSeparator();
}
@Override
public void writeKey(Object key) throws IOException {
writeUTF8(key);
clientOut.write(inputSeparator);
}
@Override
public void writeValue(Object value) throws IOException {
writeUTF8(value);
clientOut.write('\n');
}
// Write an object to the output stream using UTF-8 encoding
protected void writeUTF8(Object object) throws IOException {
byte[] bval;
int valSize;
if (object instanceof BytesWritable) {
BytesWritable val = (BytesWritable) object;
bval = val.getBytes();
valSize = val.getLength();
} else if (object instanceof Text) {
Text val = (Text) object;
bval = val.getBytes();
valSize = val.getLength();
} else {
String sval = object.toString();
bval = sval.getBytes("UTF-8");
valSize = bval.length;
}
clientOut.write(bval, 0, valSize);
}
}
| 2,334 | 29.723684 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.typedbytes.TypedBytesWritable;
/**
* This class is used to resolve a string identifier into the required IO
* classes. By extending this class and pointing the property
* <tt>stream.io.identifier.resolver.class</tt> to this extension, additional
* IO classes can be added by external code.
*/
public class IdentifierResolver {
// note that the identifiers are case insensitive
public static final String TEXT_ID = "text";
public static final String RAW_BYTES_ID = "rawbytes";
public static final String TYPED_BYTES_ID = "typedbytes";
public static final String KEY_ONLY_TEXT_ID = "keyonlytext";
private Class<? extends InputWriter> inputWriterClass = null;
private Class<? extends OutputReader> outputReaderClass = null;
private Class outputKeyClass = null;
private Class outputValueClass = null;
/**
* Resolves a given identifier. This method has to be called before calling
* any of the getters.
*/
public void resolve(String identifier) {
if (identifier.equalsIgnoreCase(RAW_BYTES_ID)) {
setInputWriterClass(RawBytesInputWriter.class);
setOutputReaderClass(RawBytesOutputReader.class);
setOutputKeyClass(BytesWritable.class);
setOutputValueClass(BytesWritable.class);
} else if (identifier.equalsIgnoreCase(TYPED_BYTES_ID)) {
setInputWriterClass(TypedBytesInputWriter.class);
setOutputReaderClass(TypedBytesOutputReader.class);
setOutputKeyClass(TypedBytesWritable.class);
setOutputValueClass(TypedBytesWritable.class);
} else if (identifier.equalsIgnoreCase(KEY_ONLY_TEXT_ID)) {
setInputWriterClass(KeyOnlyTextInputWriter.class);
setOutputReaderClass(KeyOnlyTextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(NullWritable.class);
} else { // assume TEXT_ID
setInputWriterClass(TextInputWriter.class);
setOutputReaderClass(TextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(Text.class);
}
}
/**
* Returns the resolved {@link InputWriter} class.
*/
public Class<? extends InputWriter> getInputWriterClass() {
return inputWriterClass;
}
/**
* Returns the resolved {@link OutputReader} class.
*/
public Class<? extends OutputReader> getOutputReaderClass() {
return outputReaderClass;
}
/**
* Returns the resolved output key class.
*/
public Class getOutputKeyClass() {
return outputKeyClass;
}
/**
* Returns the resolved output value class.
*/
public Class getOutputValueClass() {
return outputValueClass;
}
/**
* Sets the {@link InputWriter} class.
*/
protected void setInputWriterClass(Class<? extends InputWriter>
inputWriterClass) {
this.inputWriterClass = inputWriterClass;
}
/**
* Sets the {@link OutputReader} class.
*/
protected void setOutputReaderClass(Class<? extends OutputReader>
outputReaderClass) {
this.outputReaderClass = outputReaderClass;
}
/**
* Sets the output key class class.
*/
protected void setOutputKeyClass(Class outputKeyClass) {
this.outputKeyClass = outputKeyClass;
}
/**
* Sets the output value class.
*/
protected void setOutputValueClass(Class outputValueClass) {
this.outputValueClass = outputValueClass;
}
}
| 4,309 | 31.406015 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataInput;
import java.io.EOFException;
import java.io.IOException;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.streaming.PipeMapRed;
/**
* OutputReader that reads the client's output as raw bytes.
*/
public class RawBytesOutputReader
extends OutputReader<BytesWritable, BytesWritable> {
private DataInput clientIn;
private byte[] bytes;
private BytesWritable key;
private BytesWritable value;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientIn = pipeMapRed.getClientInput();
key = new BytesWritable();
value = new BytesWritable();
}
@Override
public boolean readKeyValue() throws IOException {
int length = readLength();
if (length < 0) {
return false;
}
key.set(readBytes(length), 0, length);
length = readLength();
value.set(readBytes(length), 0, length);
return true;
}
@Override
public BytesWritable getCurrentKey() throws IOException {
return key;
}
@Override
public BytesWritable getCurrentValue() throws IOException {
return value;
}
@Override
public String getLastOutput() {
if (bytes != null) {
return new BytesWritable(bytes).toString();
} else {
return null;
}
}
private int readLength() throws IOException {
try {
return clientIn.readInt();
} catch (EOFException eof) {
return -1;
}
}
private byte[] readBytes(int length) throws IOException {
bytes = new byte[length];
clientIn.readFully(bytes);
return bytes;
}
}
| 2,470 | 25.569892 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/OutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.IOException;
import org.apache.hadoop.streaming.PipeMapRed;
/**
* Abstract base for classes that read the client's output.
*/
public abstract class OutputReader<K, V> {
/**
* Initializes the OutputReader. This method has to be called before
* calling any of the other methods.
*/
public void initialize(PipeMapRed pipeMapRed) throws IOException {
// nothing here yet, but that might change in the future
}
/**
* Read the next key/value pair outputted by the client.
* @return true iff a key/value pair was read
*/
public abstract boolean readKeyValue() throws IOException;
/**
* Returns the current key.
*/
public abstract K getCurrentKey() throws IOException;
/**
* Returns the current value.
*/
public abstract V getCurrentValue() throws IOException;
/**
* Returns the last output from the client as a String.
*/
public abstract String getLastOutput();
}
| 1,805 | 29.1 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextOutputReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.DataInput;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.streaming.PipeMapRed;
import org.apache.hadoop.util.LineReader;
/**
* OutputReader that reads the client's output as text, interpreting each line
* as a key and outputting NullWritables for values.
*/
public class KeyOnlyTextOutputReader extends OutputReader<Text, NullWritable> {
private LineReader lineReader;
private byte[] bytes;
private DataInput clientIn;
private Configuration conf;
private Text key;
private Text line;
@Override
public void initialize(PipeMapRed pipeMapRed) throws IOException {
super.initialize(pipeMapRed);
clientIn = pipeMapRed.getClientInput();
conf = pipeMapRed.getConfiguration();
lineReader = new LineReader((InputStream)clientIn, conf);
key = new Text();
line = new Text();
}
@Override
public boolean readKeyValue() throws IOException {
if (lineReader.readLine(line) <= 0) {
return false;
}
bytes = line.getBytes();
key.set(bytes, 0, line.getLength());
line.clear();
return true;
}
@Override
public Text getCurrentKey() throws IOException {
return key;
}
@Override
public NullWritable getCurrentValue() throws IOException {
return NullWritable.get();
}
@Override
public String getLastOutput() {
if (bytes != null) {
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
return "<undecodable>";
}
} else {
return null;
}
}
}
| 2,588 | 27.450549 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextInputWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming.io;
import java.io.IOException;
public class KeyOnlyTextInputWriter extends TextInputWriter {
@Override
public void writeKey(Object key) throws IOException {
writeUTF8(key);
clientOut.write('\n');
}
@Override
public void writeValue(Object value) throws IOException {}
}
| 1,138 | 30.638889 | 75 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.