repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**
* This class is an MR2 replacement for older MR1 MiniMRCluster, that was used
* by tests prior to MR2. This replacement class uses the new MiniMRYarnCluster
* in MR2 but provides the same old MR1 interface, so tests can be migrated from
* MR1 to MR2 with minimal changes.
*
* Due to major differences between MR1 and MR2, a number of methods are either
* unimplemented/unsupported or were re-implemented to provide wrappers around
* MR2 functionality.
*
* @deprecated Use {@link org.apache.hadoop.mapred.MiniMRClientClusterFactory}
* instead
*/
@Deprecated
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MiniMRCluster {
private static final Log LOG = LogFactory.getLog(MiniMRCluster.class);
private MiniMRClientCluster mrClientCluster;
public String getTaskTrackerLocalDir(int taskTracker) {
throw new UnsupportedOperationException();
}
public String[] getTaskTrackerLocalDirs(int taskTracker) {
throw new UnsupportedOperationException();
}
class JobTrackerRunner {
// Mock class
}
class TaskTrackerRunner {
// Mock class
}
public JobTrackerRunner getJobTrackerRunner() {
throw new UnsupportedOperationException();
}
TaskTrackerRunner getTaskTrackerRunner(int id) {
throw new UnsupportedOperationException();
}
public int getNumTaskTrackers() {
throw new UnsupportedOperationException();
}
public void setInlineCleanupThreads() {
throw new UnsupportedOperationException();
}
public void waitUntilIdle() {
throw new UnsupportedOperationException();
}
private void waitTaskTrackers() {
throw new UnsupportedOperationException();
}
public int getJobTrackerPort() {
throw new UnsupportedOperationException();
}
public JobConf createJobConf() {
JobConf jobConf = null;
try {
jobConf = new JobConf(mrClientCluster.getConfig());
} catch (IOException e) {
LOG.error(e);
}
return jobConf;
}
public JobConf createJobConf(JobConf conf) {
JobConf jobConf = null;
try {
jobConf = new JobConf(mrClientCluster.getConfig());
} catch (IOException e) {
LOG.error(e);
}
return jobConf;
}
static JobConf configureJobConf(JobConf conf, String namenode,
int jobTrackerPort, int jobTrackerInfoPort, UserGroupInformation ugi) {
throw new UnsupportedOperationException();
}
public MiniMRCluster(int numTaskTrackers, String namenode, int numDir,
String[] racks, String[] hosts) throws IOException {
this(0, 0, numTaskTrackers, namenode, numDir, racks, hosts);
}
public MiniMRCluster(int numTaskTrackers, String namenode, int numDir,
String[] racks, String[] hosts, JobConf conf) throws IOException {
this(0, 0, numTaskTrackers, namenode, numDir, racks, hosts, null, conf);
}
public MiniMRCluster(int numTaskTrackers, String namenode, int numDir)
throws IOException {
this(0, 0, numTaskTrackers, namenode, numDir);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir) throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
null);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks)
throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
racks, null);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks,
String[] hosts) throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
racks, hosts, null);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks,
String[] hosts, UserGroupInformation ugi) throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
racks, hosts, ugi, null);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks,
String[] hosts, UserGroupInformation ugi, JobConf conf)
throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
racks, hosts, ugi, conf, 0);
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks,
String[] hosts, UserGroupInformation ugi, JobConf conf,
int numTrackerToExclude) throws IOException {
this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir,
racks, hosts, ugi, conf, numTrackerToExclude, new Clock());
}
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
int numTaskTrackers, String namenode, int numDir, String[] racks,
String[] hosts, UserGroupInformation ugi, JobConf conf,
int numTrackerToExclude, Clock clock) throws IOException {
if (conf == null) conf = new JobConf();
FileSystem.setDefaultUri(conf, namenode);
String identifier = this.getClass().getSimpleName() + "_"
+ Integer.toString(new Random().nextInt(Integer.MAX_VALUE));
mrClientCluster = MiniMRClientClusterFactory.create(this.getClass(),
identifier, numTaskTrackers, conf);
}
public UserGroupInformation getUgi() {
throw new UnsupportedOperationException();
}
public TaskCompletionEvent[] getTaskCompletionEvents(JobID id, int from,
int max) throws IOException {
throw new UnsupportedOperationException();
}
public void setJobPriority(JobID jobId, JobPriority priority)
throws AccessControlException, IOException {
throw new UnsupportedOperationException();
}
public JobPriority getJobPriority(JobID jobId) {
throw new UnsupportedOperationException();
}
public long getJobFinishTime(JobID jobId) {
throw new UnsupportedOperationException();
}
public void initializeJob(JobID jobId) throws IOException {
throw new UnsupportedOperationException();
}
public MapTaskCompletionEventsUpdate getMapTaskCompletionEventsUpdates(
int index, JobID jobId, int max) throws IOException {
throw new UnsupportedOperationException();
}
public JobConf getJobTrackerConf() {
JobConf jobConf = null;
try {
jobConf = new JobConf(mrClientCluster.getConfig());
} catch (IOException e) {
LOG.error(e);
}
return jobConf;
}
public int getFaultCount(String hostName) {
throw new UnsupportedOperationException();
}
public void startJobTracker() {
// Do nothing
}
public void startJobTracker(boolean wait) {
// Do nothing
}
public void stopJobTracker() {
// Do nothing
}
public void stopTaskTracker(int id) {
// Do nothing
}
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
// Do nothing
}
void addTaskTracker(TaskTrackerRunner taskTracker) {
throw new UnsupportedOperationException();
}
int getTaskTrackerID(String trackerName) {
throw new UnsupportedOperationException();
}
public void shutdown() {
try {
mrClientCluster.stop();
} catch (IOException e) {
LOG.error(e);
}
}
}
| 8,632 | 30.507299 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.junit.Ignore;
import org.junit.Test;
import java.io.File;
import java.net.URLClassLoader;
import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.ClassUtil;
import static org.junit.Assert.*;
@Ignore
public class TestMRCJCJobConf {
private static final String JAR_RELATIVE_PATH =
"build/test/mapred/testjar/testjob.jar";
private static final String CLASSNAME = "testjar.ClassWordCount";
private static String TEST_DIR_WITH_SPECIAL_CHARS =
System.getProperty("test.build.data","/tmp") +
File.separator + "test jobconf with + and spaces";
@Test
public void testFindContainingJar() throws Exception {
testJarAtPath(JAR_RELATIVE_PATH);
}
/**
* Test that findContainingJar works correctly even if the
* path has a "+" sign or spaces in it
*/
@Test
public void testFindContainingJarWithPlus() throws Exception {
new File(TEST_DIR_WITH_SPECIAL_CHARS).mkdirs();
Configuration conf = new Configuration();
FileSystem localfs = FileSystem.getLocal(conf);
FileUtil.copy(localfs, new Path(JAR_RELATIVE_PATH),
localfs, new Path(TEST_DIR_WITH_SPECIAL_CHARS, "test.jar"),
false, true, conf);
testJarAtPath(TEST_DIR_WITH_SPECIAL_CHARS + File.separator + "test.jar");
}
/**
* Given a path with a jar, make a classloader with that jar on the
* classpath, and check that findContainingJar can correctly
* identify the path of the jar.
*/
private void testJarAtPath(String path) throws Exception {
File jar = new File(path).getAbsoluteFile();
assertTrue(jar.exists());
URL urls[] = new URL[] {
jar.toURI().toURL()
};
ClassLoader cl = new URLClassLoader(urls);
Class clazz = Class.forName(CLASSNAME, true, cl);
assertNotNull(clazz);
String containingJar = ClassUtil.findContainingJar(clazz);
assertEquals(jar.getAbsolutePath(), containingJar);
}
}
| 2,903 | 32.37931 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.util.Shell;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Class to test mapred task's
* - temp directory
* - child env
*/
public class TestMiniMRChildTask {
private static final Log LOG =
LogFactory.getLog(TestMiniMRChildTask.class.getName());
private final static String OLD_CONFIGS = "test.old.configs";
private final static String TASK_OPTS_VAL = "-Xmx200m";
private final static String MAP_OPTS_VAL = "-Xmx200m";
private final static String REDUCE_OPTS_VAL = "-Xmx300m";
private static MiniMRYarnCluster mr;
private static MiniDFSCluster dfs;
private static FileSystem fileSys;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR = new Path("target",
TestMiniMRChildTask.class.getName() + "-tmpDir").makeQualified(localFs);
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
/**
* Map class which checks whether temp directory exists
* and check the value of java.io.tmpdir
* Creates a tempfile and checks whether that is created in
* temp directory specified.
*/
public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
Path tmpDir;
public void map (LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
if (localFs.exists(tmpDir)) {
} else {
fail("Temp directory " + tmpDir +" doesnt exist.");
}
File tmpFile = File.createTempFile("test", ".tmp");
}
public void configure(JobConf job) {
tmpDir = new Path(System.getProperty("java.io.tmpdir"));
try {
localFs = FileSystem.getLocal(job);
} catch (IOException ioe) {
ioe.printStackTrace();
fail("IOException in getting localFS");
}
}
}
/**
* Map class which checks if hadoop lib location
* is in the execution path
*/
public static class ExecutionEnvCheckMapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
public void map (LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
}
public void configure(JobConf job) {
String executionEnvPathVariable = System.getenv(Shell.WINDOWS ? "PATH"
: "LD_LIBRARY_PATH");
String hadoopHome = System.getenv("HADOOP_COMMON_HOME");
if (hadoopHome == null) {
hadoopHome = "";
}
String hadoopLibLocation = hadoopHome
+ (Shell.WINDOWS ? "\\bin" : "/lib/native");
assertTrue(executionEnvPathVariable.contains(hadoopLibLocation));
}
}
// configure a job
private void configure(JobConf conf, Path inDir, Path outDir, String input,
Class<? extends Mapper> map,
Class<? extends Reducer> reduce)
throws IOException {
// set up the input file system and write input text.
FileSystem inFs = inDir.getFileSystem(conf);
FileSystem outFs = outDir.getFileSystem(conf);
outFs.delete(outDir, true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
// write input into input file
DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
// configure the mapred Job which creates a tempfile in map.
conf.setJobName("testmap");
conf.setMapperClass(map);
conf.setReducerClass(reduce);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(0);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
"/tmp")).toString().replace(' ', '+');
conf.set("test.build.data", TEST_ROOT_DIR);
}
/**
* Launch tests
* @param conf Configuration of the mapreduce job.
* @param inDir input path
* @param outDir output path
* @param input Input text
* @throws IOException
*/
public void launchTest(JobConf conf,
Path inDir,
Path outDir,
String input)
throws IOException, InterruptedException, ClassNotFoundException {
FileSystem outFs = outDir.getFileSystem(conf);
// Launch job with default option for temp dir.
// i.e. temp dir is ./tmp
Job job = Job.getInstance(conf);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(TestMiniMRChildTask.class);
job.setMaxMapAttempts(1); // speed up failures
job.waitForCompletion(true);
boolean succeeded = job.waitForCompletion(true);
assertTrue(succeeded);
outFs.delete(outDir, true);
}
private static void checkEnv(String envName, String expValue, String mode) {
String envValue = System.getenv(envName).trim();
if ("append".equals(mode)) {
if (envValue == null || !envValue.contains(File.pathSeparator)) {
throw new RuntimeException("Missing env variable");
} else {
String parts[] = envValue.split(File.pathSeparator);
// check if the value is appended
if (!parts[parts.length - 1].equals(expValue)) {
throw new RuntimeException("Wrong env variable in append mode");
}
}
} else {
if (envValue == null || !envValue.equals(expValue)) {
throw new RuntimeException("Wrong env variable in noappend mode");
}
}
}
// Mappers that simply checks if the desired user env are present or not
static class EnvCheckMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void configure(JobConf job) {
boolean oldConfigs = job.getBoolean(OLD_CONFIGS, false);
if (oldConfigs) {
String javaOpts = job.get(JobConf.MAPRED_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!",
javaOpts);
assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " +
javaOpts,
javaOpts, TASK_OPTS_VAL);
} else {
String mapJavaOpts = job.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " is null!",
mapJavaOpts);
assertEquals(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " has value of: " +
mapJavaOpts,
mapJavaOpts, MAP_OPTS_VAL);
}
String path = job.get("path");
// check if the pwd is there in LD_LIBRARY_PATH
String pwd = System.getenv("PWD");
assertTrue("LD doesnt contain pwd",
System.getenv("LD_LIBRARY_PATH").contains(pwd));
// check if X=$X:/abc works for LD_LIBRARY_PATH
checkEnv("LD_LIBRARY_PATH", "/tmp", "append");
// check if X=y works for an already existing parameter
checkEnv("LANG", "en_us_8859_1", "noappend");
// check if X=/tmp for a new env variable
checkEnv("MY_PATH", "/tmp", "noappend");
// check if X=$X:/tmp works for a new env var and results into :/tmp
checkEnv("NEW_PATH", File.pathSeparator + "/tmp", "noappend");
// check if X=$(tt's X var):/tmp for an old env variable inherited from
// the tt
if (Shell.WINDOWS) {
// On Windows, PATH is replaced one more time as part of default config
// of "mapreduce.admin.user.env", i.e. on Windows,
// "mapreduce.admin.user.env" is set to
// "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin"
String hadoopHome = System.getenv("HADOOP_COMMON_HOME");
if (hadoopHome == null) {
hadoopHome = "";
}
String hadoopLibLocation = hadoopHome + "\\bin";
path += File.pathSeparator + hadoopLibLocation;
path += File.pathSeparator + path;
}
checkEnv("PATH", path + File.pathSeparator + "/tmp", "noappend");
String jobLocalDir = job.get(MRJobConfig.JOB_LOCAL_DIR);
assertNotNull(MRJobConfig.JOB_LOCAL_DIR + " is null",
jobLocalDir);
}
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out,
Reporter reporter)
throws IOException {
}
}
static class EnvCheckReducer extends MapReduceBase
implements Reducer<WritableComparable, Writable, WritableComparable, Writable> {
@Override
public void configure(JobConf job) {
boolean oldConfigs = job.getBoolean(OLD_CONFIGS, false);
if (oldConfigs) {
String javaOpts = job.get(JobConf.MAPRED_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!",
javaOpts);
assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " +
javaOpts,
javaOpts, TASK_OPTS_VAL);
} else {
String reduceJavaOpts = job.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " is null!",
reduceJavaOpts);
assertEquals(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " has value of: " +
reduceJavaOpts,
reduceJavaOpts, REDUCE_OPTS_VAL);
}
String path = job.get("path");
// check if the pwd is there in LD_LIBRARY_PATH
String pwd = System.getenv("PWD");
assertTrue("LD doesnt contain pwd",
System.getenv("LD_LIBRARY_PATH").contains(pwd));
// check if X=$X:/abc works for LD_LIBRARY_PATH
checkEnv("LD_LIBRARY_PATH", "/tmp", "append");
// check if X=y works for an already existing parameter
checkEnv("LANG", "en_us_8859_1", "noappend");
// check if X=/tmp for a new env variable
checkEnv("MY_PATH", "/tmp", "noappend");
// check if X=$X:/tmp works for a new env var and results into :/tmp
checkEnv("NEW_PATH", File.pathSeparator + "/tmp", "noappend");
// check if X=$(tt's X var):/tmp for an old env variable inherited from
// the tt
if (Shell.WINDOWS) {
// On Windows, PATH is replaced one more time as part of default config
// of "mapreduce.admin.user.env", i.e. on Windows,
// "mapreduce.admin.user.env"
// is set to "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin"
String hadoopHome = System.getenv("HADOOP_COMMON_HOME");
if (hadoopHome == null) {
hadoopHome = "";
}
String hadoopLibLocation = hadoopHome + "\\bin";
path += File.pathSeparator + hadoopLibLocation;
path += File.pathSeparator + path;
}
checkEnv("PATH", path + File.pathSeparator + "/tmp", "noappend");
}
@Override
public void reduce(WritableComparable key, Iterator<Writable> values,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
}
}
@BeforeClass
public static void setup() throws IOException {
// create configuration, dfs, file system and mapred cluster
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mr == null) {
mr = new MiniMRYarnCluster(TestMiniMRChildTask.class.getName());
Configuration conf = new Configuration();
mr.init(conf);
mr.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
// close file system and shut down dfs and mapred cluster
try {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.stop();
mr = null;
}
} catch (IOException ioe) {
LOG.info("IO exception in closing file system)" );
ioe.printStackTrace();
}
}
/**
* To test OS dependent setting of default execution path for a MapRed task.
* Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -
* for WINDOWS: %HADOOP_COMMON_HOME%\bin is expected to be included in PATH - for
* Linux: $HADOOP_COMMON_HOME/lib/native is expected to be included in
* LD_LIBRARY_PATH
*/
@Test
public void testMapRedExecutionEnv() {
// test if the env variable can be set
try {
// Application environment
Map<String, String> environment = new HashMap<String, String>();
String setupHadoopHomeCommand = Shell.WINDOWS ?
"HADOOP_COMMON_HOME=C:\\fake\\PATH\\to\\hadoop\\common\\home" :
"HADOOP_COMMON_HOME=/fake/path/to/hadoop/common/home";
MRApps.setEnvFromInputString(environment, setupHadoopHomeCommand, conf);
// Add the env variables passed by the admin
MRApps.setEnvFromInputString(environment, conf.get(
MRJobConfig.MAPRED_ADMIN_USER_ENV,
MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV), conf);
String executionPaths = environment.get(
Shell.WINDOWS ? "PATH" : "LD_LIBRARY_PATH");
String toFind = Shell.WINDOWS ?
"C:\\fake\\PATH\\to\\hadoop\\common\\home\\bin" :
"/fake/path/to/hadoop/common/home/lib/native";
// Ensure execution PATH/LD_LIBRARY_PATH set up pointing to hadoop lib
assertTrue("execution path does not include the hadoop lib location "
+ toFind, executionPaths.contains(toFind));
} catch (Exception e) {
e.printStackTrace();
fail("Exception in testing execution environment for MapReduce task");
tearDown();
}
// now launch a mapreduce job to ensure that the child
// also gets the configured setting for hadoop lib
try {
JobConf conf = new JobConf(mr.getConfig());
// initialize input, output directories
Path inDir = new Path("input");
Path outDir = new Path("output");
String input = "The input";
// set config to use the ExecutionEnvCheckMapClass map class
configure(conf, inDir, outDir, input,
ExecutionEnvCheckMapClass.class, IdentityReducer.class);
launchTest(conf, inDir, outDir, input);
} catch(Exception e) {
e.printStackTrace();
fail("Exception in testing propagation of env setting to child task");
tearDown();
}
}
/**
* Test to test if the user set env variables reflect in the child
* processes. Mainly
* - x=y (x can be a already existing env variable or a new variable)
* - x=$x:y (replace $x with the current value of x)
*/
@Test
public void testTaskEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
// initialize input, output directories
Path inDir = new Path("testing/wc/input1");
Path outDir = new Path("testing/wc/output1");
FileSystem outFs = outDir.getFileSystem(conf);
runTestTaskEnv(conf, inDir, outDir, false);
outFs.delete(outDir, true);
} catch(Exception e) {
e.printStackTrace();
fail("Exception in testing child env");
tearDown();
}
}
/**
* Test to test if the user set *old* env variables reflect in the child
* processes. Mainly
* - x=y (x can be a already existing env variable or a new variable)
* - x=$x:y (replace $x with the current value of x)
*/
@Test
public void testTaskOldEnv(){
try {
JobConf conf = new JobConf(mr.getConfig());
// initialize input, output directories
Path inDir = new Path("testing/wc/input1");
Path outDir = new Path("testing/wc/output1");
FileSystem outFs = outDir.getFileSystem(conf);
runTestTaskEnv(conf, inDir, outDir, true);
outFs.delete(outDir, true);
} catch(Exception e) {
e.printStackTrace();
fail("Exception in testing child env");
tearDown();
}
}
void runTestTaskEnv(JobConf conf, Path inDir, Path outDir, boolean oldConfigs)
throws IOException, InterruptedException, ClassNotFoundException {
String input = "The input";
configure(conf, inDir, outDir, input,
EnvCheckMapper.class, EnvCheckReducer.class);
// test
// - new SET of new var (MY_PATH)
// - set of old var (LANG)
// - append to an old var from modified env (LD_LIBRARY_PATH)
// - append to an old var from tt's env (PATH)
// - append to a new var (NEW_PATH)
String mapTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV;
String reduceTaskEnvKey = JobConf.MAPRED_MAP_TASK_ENV;
String mapTaskJavaOptsKey = JobConf.MAPRED_MAP_TASK_JAVA_OPTS;
String reduceTaskJavaOptsKey = JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS;
String mapTaskJavaOpts = MAP_OPTS_VAL;
String reduceTaskJavaOpts = REDUCE_OPTS_VAL;
conf.setBoolean(OLD_CONFIGS, oldConfigs);
if (oldConfigs) {
mapTaskEnvKey = reduceTaskEnvKey = JobConf.MAPRED_TASK_ENV;
mapTaskJavaOptsKey = reduceTaskJavaOptsKey = JobConf.MAPRED_TASK_JAVA_OPTS;
mapTaskJavaOpts = reduceTaskJavaOpts = TASK_OPTS_VAL;
}
conf.set(
mapTaskEnvKey,
Shell.WINDOWS ? "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=%LD_LIBRARY_PATH%;/tmp,"
+ "PATH=%PATH%;/tmp,NEW_PATH=%NEW_PATH%;/tmp"
: "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp,"
+ "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp");
conf.set(
reduceTaskEnvKey,
Shell.WINDOWS ? "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=%LD_LIBRARY_PATH%;/tmp,"
+ "PATH=%PATH%;/tmp,NEW_PATH=%NEW_PATH%;/tmp"
: "MY_PATH=/tmp,LANG=en_us_8859_1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp,"
+ "PATH=$PATH:/tmp,NEW_PATH=$NEW_PATH:/tmp");
conf.set("path", System.getenv("PATH"));
conf.set(mapTaskJavaOptsKey, mapTaskJavaOpts);
conf.set(reduceTaskJavaOptsKey, reduceTaskJavaOpts);
Job job = Job.getInstance(conf);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(TestMiniMRChildTask.class);
job.setMaxMapAttempts(1); // speed up failures
job.waitForCompletion(true);
boolean succeeded = job.waitForCompletion(true);
assertTrue("The environment checker job failed.", succeeded);
}
}
| 20,968 | 37.125455 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTaskCommit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapred.SortedRanges.Range;
import org.apache.hadoop.mapreduce.TaskType;
public class TestTaskCommit extends HadoopTestCase {
Path rootDir =
new Path(System.getProperty("test.build.data", "/tmp"), "test");
static class CommitterWithCommitFail extends FileOutputCommitter {
public void commitTask(TaskAttemptContext context) throws IOException {
Path taskOutputPath = getTaskAttemptPath(context);
TaskAttemptID attemptId = context.getTaskAttemptID();
JobConf job = context.getJobConf();
if (taskOutputPath != null) {
FileSystem fs = taskOutputPath.getFileSystem(job);
if (fs.exists(taskOutputPath)) {
throw new IOException();
}
}
}
}
/**
* Special Committer that does not cleanup temporary files in
* abortTask
*
* The framework's FileOutputCommitter cleans up any temporary
* files left behind in abortTask. We want the test case to
* find these files and hence short-circuit abortTask.
*/
static class CommitterWithoutCleanup extends FileOutputCommitter {
@Override
public void abortTask(TaskAttemptContext context) throws IOException {
// does nothing
}
}
/**
* Special committer that always requires commit.
*/
static class CommitterThatAlwaysRequiresCommit extends FileOutputCommitter {
@Override
public boolean needsTaskCommit(TaskAttemptContext context)
throws IOException {
return true;
}
}
public TestTaskCommit() throws IOException {
super(LOCAL_MR, LOCAL_FS, 1, 1);
}
@Override
public void tearDown() throws Exception {
super.tearDown();
FileUtil.fullyDelete(new File(rootDir.toString()));
}
public void testCommitFail() throws IOException {
final Path inDir = new Path(rootDir, "./input");
final Path outDir = new Path(rootDir, "./output");
JobConf jobConf = createJobConf();
jobConf.setMaxMapAttempts(1);
jobConf.setOutputCommitter(CommitterWithCommitFail.class);
RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0);
rJob.waitForCompletion();
assertEquals(JobStatus.FAILED, rJob.getJobState());
}
private class MyUmbilical implements TaskUmbilicalProtocol {
boolean taskDone = false;
@Override
public boolean canCommit(TaskAttemptID taskid) throws IOException {
return false;
}
@Override
public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException {
fail("Task should not go to commit-pending");
}
@Override
public void done(TaskAttemptID taskid) throws IOException {
taskDone = true;
}
@Override
public void fatalError(TaskAttemptID taskId, String message)
throws IOException { }
@Override
public void fsError(TaskAttemptID taskId, String message)
throws IOException { }
@Override
public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId,
int fromIndex, int maxLocs, TaskAttemptID id) throws IOException {
return null;
}
@Override
public JvmTask getTask(JvmContext context) throws IOException {
return null;
}
@Override
public boolean ping(TaskAttemptID taskid) throws IOException {
return true;
}
@Override
public void reportDiagnosticInfo(TaskAttemptID taskid, String trace)
throws IOException {
}
@Override
public void reportNextRecordRange(TaskAttemptID taskid, Range range)
throws IOException {
}
@Override
public void shuffleError(TaskAttemptID taskId, String message)
throws IOException {
}
@Override
public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException {
return true;
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return 0;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return null;
}
}
/**
* A test that mimics a failed task to ensure that it does
* not get into the COMMIT_PENDING state, by using a fake
* UmbilicalProtocol's implementation that fails if the commit.
* protocol is played.
*
* The test mocks the various steps in a failed task's
* life-cycle using a special OutputCommitter and UmbilicalProtocol
* implementation.
*
* @throws Exception
*/
public void testTaskCleanupDoesNotCommit() throws Exception {
// Mimic a job with a special committer that does not cleanup
// files when a task fails.
JobConf job = new JobConf();
job.setOutputCommitter(CommitterWithoutCleanup.class);
Path outDir = new Path(rootDir, "output");
FileOutputFormat.setOutputPath(job, outDir);
// Mimic job setup
String dummyAttemptID = "attempt_200707121733_0001_m_000000_0";
TaskAttemptID attemptID = TaskAttemptID.forName(dummyAttemptID);
OutputCommitter committer = new CommitterWithoutCleanup();
JobContext jContext = new JobContextImpl(job, attemptID.getJobID());
committer.setupJob(jContext);
// Mimic a map task
dummyAttemptID = "attempt_200707121733_0001_m_000001_0";
attemptID = TaskAttemptID.forName(dummyAttemptID);
Task task = new MapTask(null, attemptID, 0, null, 1);
task.setConf(job);
task.localizeConfiguration(job);
task.initialize(job, attemptID.getJobID(), Reporter.NULL, false);
// Mimic the map task writing some output.
String file = "test.txt";
FileSystem localFs = FileSystem.getLocal(job);
TextOutputFormat<Text, Text> theOutputFormat
= new TextOutputFormat<Text, Text>();
RecordWriter<Text, Text> theRecordWriter =
theOutputFormat.getRecordWriter(localFs,
job, file, Reporter.NULL);
theRecordWriter.write(new Text("key"), new Text("value"));
theRecordWriter.close(Reporter.NULL);
// Mimic a task failure; setting up the task for cleanup simulates
// the abort protocol to be played.
// Without checks in the framework, this will fail
// as the committer will cause a COMMIT to happen for
// the cleanup task.
task.setTaskCleanupTask();
MyUmbilical umbilical = new MyUmbilical();
task.run(job, umbilical);
assertTrue("Task did not succeed", umbilical.taskDone);
}
public void testCommitRequiredForMapTask() throws Exception {
Task testTask = createDummyTask(TaskType.MAP);
assertTrue("MapTask should need commit", testTask.isCommitRequired());
}
public void testCommitRequiredForReduceTask() throws Exception {
Task testTask = createDummyTask(TaskType.REDUCE);
assertTrue("ReduceTask should need commit", testTask.isCommitRequired());
}
public void testCommitNotRequiredForJobSetup() throws Exception {
Task testTask = createDummyTask(TaskType.MAP);
testTask.setJobSetupTask();
assertFalse("Job setup task should not need commit",
testTask.isCommitRequired());
}
public void testCommitNotRequiredForJobCleanup() throws Exception {
Task testTask = createDummyTask(TaskType.MAP);
testTask.setJobCleanupTask();
assertFalse("Job cleanup task should not need commit",
testTask.isCommitRequired());
}
public void testCommitNotRequiredForTaskCleanup() throws Exception {
Task testTask = createDummyTask(TaskType.REDUCE);
testTask.setTaskCleanupTask();
assertFalse("Task cleanup task should not need commit",
testTask.isCommitRequired());
}
private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
JobConf conf = new JobConf();
conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
Path outDir = new Path(rootDir, "output");
FileOutputFormat.setOutputPath(conf, outDir);
JobID jobId = JobID.forName("job_201002121132_0001");
Task testTask;
if (type == TaskType.MAP) {
testTask = new MapTask();
} else {
testTask = new ReduceTask();
}
testTask.setConf(conf);
testTask.initialize(conf, jobId, Reporter.NULL, false);
return testTask;
}
public static void main(String[] argv) throws Exception {
TestTaskCommit td = new TestTaskCommit();
td.testCommitFail();
}
}
| 9,524 | 32.188153 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.File;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
/**********************************************************
* MapredLoadTest generates a bunch of work that exercises
* a Hadoop Map-Reduce system (and DFS, too). It goes through
* the following steps:
*
* 1) Take inputs 'range' and 'counts'.
* 2) Generate 'counts' random integers between 0 and range-1.
* 3) Create a file that lists each integer between 0 and range-1,
* and lists the number of times that integer was generated.
* 4) Emit a (very large) file that contains all the integers
* in the order generated.
* 5) After the file has been generated, read it back and count
* how many times each int was generated.
* 6) Compare this big count-map against the original one. If
* they match, then SUCCESS! Otherwise, FAILURE!
*
* OK, that's how we can think about it. What are the map-reduce
* steps that get the job done?
*
* 1) In a non-mapred thread, take the inputs 'range' and 'counts'.
* 2) In a non-mapread thread, generate the answer-key and write to disk.
* 3) In a mapred job, divide the answer key into K jobs.
* 4) A mapred 'generator' task consists of K map jobs. Each reads
* an individual "sub-key", and generates integers according to
* to it (though with a random ordering).
* 5) The generator's reduce task agglomerates all of those files
* into a single one.
* 6) A mapred 'reader' task consists of M map jobs. The output
* file is cut into M pieces. Each of the M jobs counts the
* individual ints in its chunk and creates a map of all seen ints.
* 7) A mapred job integrates all the count files into a single one.
*
**********************************************************/
public class TestMapRed extends Configured implements Tool {
/**
* Modified to make it a junit test.
* The RandomGen Job does the actual work of creating
* a huge file of assorted numbers. It receives instructions
* as to how many times each number should be counted. Then
* it emits those numbers in a crazy order.
*
* The map() function takes a key/val pair that describes
* a value-to-be-emitted (the key) and how many times it
* should be emitted (the value), aka "numtimes". map() then
* emits a series of intermediate key/val pairs. It emits
* 'numtimes' of these. The key is a random number and the
* value is the 'value-to-be-emitted'.
*
* The system collates and merges these pairs according to
* the random number. reduce() function takes in a key/value
* pair that consists of a crazy random number and a series
* of values that should be emitted. The random number key
* is now dropped, and reduce() emits a pair for every intermediate value.
* The emitted key is an intermediate value. The emitted value
* is just a blank string. Thus, we've created a huge file
* of numbers in random order, but where each number appears
* as many times as we were instructed.
*/
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestMapRed-mapred");
static class RandomGenMapper
implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void map(IntWritable key, IntWritable val,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
int randomVal = key.get();
int randomCount = val.get();
for (int i = 0; i < randomCount; i++) {
out.collect(new IntWritable(Math.abs(r.nextInt())), new IntWritable(randomVal));
}
}
public void close() {
}
}
/**
*/
static class RandomGenReducer
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void reduce(IntWritable key, Iterator<IntWritable> it,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
while (it.hasNext()) {
out.collect(it.next(), null);
}
}
public void close() {
}
}
/**
* The RandomCheck Job does a lot of our work. It takes
* in a num/string keyspace, and transforms it into a
* key/count(int) keyspace.
*
* The map() function just emits a num/1 pair for every
* num/string input pair.
*
* The reduce() function sums up all the 1s that were
* emitted for a single key. It then emits the key/total
* pair.
*
* This is used to regenerate the random number "answer key".
* Each key here is a random number, and the count is the
* number of times the number was emitted.
*/
static class RandomCheckMapper
implements Mapper<WritableComparable, Text, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void map(WritableComparable key, Text val,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
out.collect(new IntWritable(Integer.parseInt(val.toString().trim())), new IntWritable(1));
}
public void close() {
}
}
/**
*/
static class RandomCheckReducer
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void reduce(IntWritable key, Iterator<IntWritable> it,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
int keyint = key.get();
int count = 0;
while (it.hasNext()) {
it.next();
count++;
}
out.collect(new IntWritable(keyint), new IntWritable(count));
}
public void close() {
}
}
/**
* The Merge Job is a really simple one. It takes in
* an int/int key-value set, and emits the same set.
* But it merges identical keys by adding their values.
*
* Thus, the map() function is just the identity function
* and reduce() just sums. Nothing to see here!
*/
static class MergeMapper
implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void map(IntWritable key, IntWritable val,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
int keyint = key.get();
int valint = val.get();
out.collect(new IntWritable(keyint), new IntWritable(valint));
}
public void close() {
}
}
static class MergeReducer
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void reduce(IntWritable key, Iterator<IntWritable> it,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
int keyint = key.get();
int total = 0;
while (it.hasNext()) {
total += it.next().get();
}
out.collect(new IntWritable(keyint), new IntWritable(total));
}
public void close() {
}
}
private static int range = 10;
private static int counts = 100;
private static Random r = new Random();
@After
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
/**
public TestMapRed(int range, int counts, Configuration conf) throws IOException {
this.range = range;
this.counts = counts;
this.conf = conf;
}
**/
@Test
public void testMapred() throws Exception {
launch();
}
private static class MyMap
implements Mapper<WritableComparable, Text, Text, Text> {
public void configure(JobConf conf) {
}
public void map(WritableComparable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String str = StringUtils.toLowerCase(value.toString());
output.collect(new Text(str), value);
}
public void close() throws IOException {
}
}
private static class MyReduce extends IdentityReducer {
private JobConf conf;
private boolean compressInput;
private boolean first = true;
@Override
public void configure(JobConf conf) {
this.conf = conf;
compressInput = conf.getCompressMapOutput();
}
public void reduce(WritableComparable key, Iterator values,
OutputCollector output, Reporter reporter
) throws IOException {
if (first) {
first = false;
MapOutputFile mapOutputFile = new MROutputFiles();
mapOutputFile.setConf(conf);
Path input = mapOutputFile.getInputFile(0);
FileSystem fs = FileSystem.get(conf);
assertTrue("reduce input exists " + input, fs.exists(input));
SequenceFile.Reader rdr =
new SequenceFile.Reader(fs, input, conf);
assertEquals("is reduce input compressed " + input,
compressInput,
rdr.isCompressed());
rdr.close();
}
}
}
public static class NullMapper
implements Mapper<NullWritable,Text,NullWritable,Text> {
public void map(NullWritable key, Text val,
OutputCollector<NullWritable,Text> output, Reporter reporter)
throws IOException {
output.collect(NullWritable.get(), val);
}
public void configure(JobConf conf) { }
public void close() { }
}
@Test
public void testNullKeys() throws Exception {
JobConf conf = new JobConf(TestMapRed.class);
FileSystem fs = FileSystem.getLocal(conf);
HashSet<String> values = new HashSet<String>();
String m = "AAAAAAAAAAAAAA";
for (int i = 1; i < 11; ++i) {
values.add(m);
m = m.replace((char)('A' + i - 1), (char)('A' + i));
}
Path testdir = new Path(
System.getProperty("test.build.data","/tmp")).makeQualified(fs);
fs.delete(testdir, true);
Path inFile = new Path(testdir, "nullin/blah");
SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, inFile,
NullWritable.class, Text.class, SequenceFile.CompressionType.NONE);
Text t = new Text();
for (String s : values) {
t.set(s);
w.append(NullWritable.get(), t);
}
w.close();
FileInputFormat.setInputPaths(conf, inFile);
FileOutputFormat.setOutputPath(conf, new Path(testdir, "nullout"));
conf.setMapperClass(NullMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(NullWritable.class);
conf.setOutputValueClass(Text.class);
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.setNumReduceTasks(1);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
JobClient.runJob(conf);
// Since null keys all equal, allow any ordering
SequenceFile.Reader r = new SequenceFile.Reader(fs,
new Path(testdir, "nullout/part-00000"), conf);
m = "AAAAAAAAAAAAAA";
for (int i = 1; r.next(NullWritable.get(), t); ++i) {
assertTrue("Unexpected value: " + t, values.remove(t.toString()));
m = m.replace((char)('A' + i - 1), (char)('A' + i));
}
assertTrue("Missing values: " + values.toString(), values.isEmpty());
}
private void checkCompression(boolean compressMapOutputs,
CompressionType redCompression,
boolean includeCombine
) throws Exception {
JobConf conf = new JobConf(TestMapRed.class);
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.get(conf);
fs.delete(testdir, true);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setMapperClass(MyMap.class);
conf.setReducerClass(MyReduce.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
if (includeCombine) {
conf.setCombinerClass(IdentityReducer.class);
}
conf.setCompressMapOutput(compressMapOutputs);
SequenceFileOutputFormat.setOutputCompressionType(conf, redCompression);
try {
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path inFile = new Path(inDir, "part0");
DataOutputStream f = fs.create(inFile);
f.writeBytes("Owen was here\n");
f.writeBytes("Hadoop is fun\n");
f.writeBytes("Is this done, yet?\n");
f.close();
RunningJob rj = JobClient.runJob(conf);
assertTrue("job was complete", rj.isComplete());
assertTrue("job was successful", rj.isSuccessful());
Path output = new Path(outDir,
Task.getOutputName(0));
assertTrue("reduce output exists " + output, fs.exists(output));
SequenceFile.Reader rdr =
new SequenceFile.Reader(fs, output, conf);
assertEquals("is reduce output compressed " + output,
redCompression != CompressionType.NONE,
rdr.isCompressed());
rdr.close();
} finally {
fs.delete(testdir, true);
}
}
@Test
public void testCompression() throws Exception {
EnumSet<SequenceFile.CompressionType> seq =
EnumSet.allOf(SequenceFile.CompressionType.class);
for (CompressionType redCompression : seq) {
for(int combine=0; combine < 2; ++combine) {
checkCompression(false, redCompression, combine == 1);
checkCompression(true, redCompression, combine == 1);
}
}
}
/**
*
*/
public void launch() throws Exception {
//
// Generate distribution of ints. This is the answer key.
//
JobConf conf;
//Check to get configuration and check if it is configured thro' Configured
//interface. This would happen when running testcase thro' command line.
if(getConf() == null) {
conf = new JobConf();
} else {
conf = new JobConf(getConf());
}
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.setJarByClass(TestMapRed.class);
int countsToGo = counts;
int dist[] = new int[range];
for (int i = 0; i < range; i++) {
double avgInts = (1.0 * countsToGo) / (range - i);
dist[i] = (int) Math.max(0, Math.round(avgInts + (Math.sqrt(avgInts) * r.nextGaussian())));
countsToGo -= dist[i];
}
if (countsToGo > 0) {
dist[dist.length-1] += countsToGo;
}
//
// Write the answer key to a file.
//
FileSystem fs = FileSystem.get(conf);
Path testdir = new Path(TEST_DIR.getAbsolutePath(), "mapred.loadtest");
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
Path randomIns = new Path(testdir, "genins");
if (!fs.mkdirs(randomIns)) {
throw new IOException("Mkdirs failed to create " + randomIns.toString());
}
Path answerkey = new Path(randomIns, "answer.key");
SequenceFile.Writer out =
SequenceFile.createWriter(fs, conf, answerkey, IntWritable.class,
IntWritable.class,
SequenceFile.CompressionType.NONE);
try {
for (int i = 0; i < range; i++) {
out.append(new IntWritable(i), new IntWritable(dist[i]));
}
} finally {
out.close();
}
//printFiles(randomIns, conf);
//
// Now we need to generate the random numbers according to
// the above distribution.
//
// We create a lot of map tasks, each of which takes at least
// one "line" of the distribution. (That is, a certain number
// X is to be generated Y number of times.)
//
// A map task emits Y key/val pairs. The val is X. The key
// is a randomly-generated number.
//
// The reduce task gets its input sorted by key. That is, sorted
// in random order. It then emits a single line of text that
// for the given values. It does not emit the key.
//
// Because there's just one reduce task, we emit a single big
// file of random numbers.
//
Path randomOuts = new Path(testdir, "genouts");
fs.delete(randomOuts, true);
JobConf genJob = new JobConf(conf, TestMapRed.class);
FileInputFormat.setInputPaths(genJob, randomIns);
genJob.setInputFormat(SequenceFileInputFormat.class);
genJob.setMapperClass(RandomGenMapper.class);
FileOutputFormat.setOutputPath(genJob, randomOuts);
genJob.setOutputKeyClass(IntWritable.class);
genJob.setOutputValueClass(IntWritable.class);
genJob.setOutputFormat(TextOutputFormat.class);
genJob.setReducerClass(RandomGenReducer.class);
genJob.setNumReduceTasks(1);
JobClient.runJob(genJob);
//printFiles(randomOuts, conf);
//
// Next, we read the big file in and regenerate the
// original map. It's split into a number of parts.
// (That number is 'intermediateReduces'.)
//
// We have many map tasks, each of which read at least one
// of the output numbers. For each number read in, the
// map task emits a key/value pair where the key is the
// number and the value is "1".
//
// We have a single reduce task, which receives its input
// sorted by the key emitted above. For each key, there will
// be a certain number of "1" values. The reduce task sums
// these values to compute how many times the given key was
// emitted.
//
// The reduce task then emits a key/val pair where the key
// is the number in question, and the value is the number of
// times the key was emitted. This is the same format as the
// original answer key (except that numbers emitted zero times
// will not appear in the regenerated key.) The answer set
// is split into a number of pieces. A final MapReduce job
// will merge them.
//
// There's not really a need to go to 10 reduces here
// instead of 1. But we want to test what happens when
// you have multiple reduces at once.
//
int intermediateReduces = 10;
Path intermediateOuts = new Path(testdir, "intermediateouts");
fs.delete(intermediateOuts, true);
JobConf checkJob = new JobConf(conf, TestMapRed.class);
FileInputFormat.setInputPaths(checkJob, randomOuts);
checkJob.setInputFormat(TextInputFormat.class);
checkJob.setMapperClass(RandomCheckMapper.class);
FileOutputFormat.setOutputPath(checkJob, intermediateOuts);
checkJob.setOutputKeyClass(IntWritable.class);
checkJob.setOutputValueClass(IntWritable.class);
checkJob.setOutputFormat(MapFileOutputFormat.class);
checkJob.setReducerClass(RandomCheckReducer.class);
checkJob.setNumReduceTasks(intermediateReduces);
JobClient.runJob(checkJob);
//printFiles(intermediateOuts, conf);
//
// OK, now we take the output from the last job and
// merge it down to a single file. The map() and reduce()
// functions don't really do anything except reemit tuples.
// But by having a single reduce task here, we end up merging
// all the files.
//
Path finalOuts = new Path(testdir, "finalouts");
fs.delete(finalOuts, true);
JobConf mergeJob = new JobConf(conf, TestMapRed.class);
FileInputFormat.setInputPaths(mergeJob, intermediateOuts);
mergeJob.setInputFormat(SequenceFileInputFormat.class);
mergeJob.setMapperClass(MergeMapper.class);
FileOutputFormat.setOutputPath(mergeJob, finalOuts);
mergeJob.setOutputKeyClass(IntWritable.class);
mergeJob.setOutputValueClass(IntWritable.class);
mergeJob.setOutputFormat(SequenceFileOutputFormat.class);
mergeJob.setReducerClass(MergeReducer.class);
mergeJob.setNumReduceTasks(1);
JobClient.runJob(mergeJob);
//printFiles(finalOuts, conf);
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
boolean success = true;
Path recomputedkey = new Path(finalOuts, "part-00000");
SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey, conf);
int totalseen = 0;
try {
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int i = 0; i < range; i++) {
if (dist[i] == 0) {
continue;
}
if (!in.next(key, val)) {
System.err.println("Cannot read entry " + i);
success = false;
break;
} else {
if (!((key.get() == i) && (val.get() == dist[i]))) {
System.err.println("Mismatch! Pos=" + key.get() + ", i=" + i +
", val=" + val.get() + ", dist[i]=" + dist[i]);
success = false;
}
totalseen += val.get();
}
}
if (success) {
if (in.next(key, val)) {
System.err.println("Unnecessary lines in recomputed key!");
success = false;
}
}
} finally {
in.close();
}
int originalTotal = 0;
for (int aDist : dist) {
originalTotal += aDist;
}
System.out.println("Original sum: " + originalTotal);
System.out.println("Recomputed sum: " + totalseen);
//
// Write to "results" whether the test succeeded or not.
//
Path resultFile = new Path(testdir, "results");
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(resultFile)));
try {
bw.write("Success=" + success + "\n");
System.out.println("Success=" + success);
} finally {
bw.close();
}
assertTrue("testMapRed failed", success);
fs.delete(testdir, true);
}
private static void printTextFile(FileSystem fs, Path p) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(p)));
String line;
while ((line = in.readLine()) != null) {
System.out.println(" Row: " + line);
}
in.close();
}
private static void printSequenceFile(FileSystem fs, Path p,
Configuration conf) throws IOException {
SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);
Object key = null;
Object value = null;
while ((key = r.next(key)) != null) {
value = r.getCurrentValue(value);
System.out.println(" Row: " + key + ", " + value);
}
r.close();
}
private static boolean isSequenceFile(FileSystem fs,
Path f) throws IOException {
DataInputStream in = fs.open(f);
byte[] seq = "SEQ".getBytes();
for(int i=0; i < seq.length; ++i) {
if (seq[i] != in.read()) {
return false;
}
}
return true;
}
private static void printFiles(Path dir,
Configuration conf) throws IOException {
FileSystem fs = dir.getFileSystem(conf);
for(FileStatus f: fs.listStatus(dir)) {
System.out.println("Reading " + f.getPath() + ": ");
if (f.isDirectory()) {
System.out.println(" it is a map file.");
printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
} else if (isSequenceFile(fs, f.getPath())) {
System.out.println(" it is a sequence file.");
printSequenceFile(fs, f.getPath(), conf);
} else {
System.out.println(" it is a text file.");
printTextFile(fs, f.getPath());
}
}
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new TestMapRed(), argv);
System.exit(res);
}
@Test
public void testSmallInput(){
runJob(100);
}
@Test
public void testBiggerInput(){
runJob(1000);
}
public void runJob(int items) {
try {
JobConf conf = new JobConf(TestMapRed.class);
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.get(conf);
fs.delete(testdir, true);
conf.setInt(JobContext.IO_SORT_MB, 1);
conf.setInputFormat(SequenceFileInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path inFile = new Path(inDir, "part0");
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile,
Text.class, Text.class);
StringBuffer content = new StringBuffer();
for (int i = 0; i < 1000; i++) {
content.append(i).append(": This is one more line of content\n");
}
Text text = new Text(content.toString());
for (int i = 0; i < items; i++) {
writer.append(new Text("rec:" + i), text);
}
writer.close();
JobClient.runJob(conf);
} catch (Exception e) {
assertTrue("Threw exception:" + e,false);
}
}
@Override
public int run(String[] argv) throws Exception {
if (argv.length < 2) {
System.err.println("Usage: TestMapRed <range> <counts>");
System.err.println();
System.err.println("Note: a good test will have a " +
"<counts> value that is substantially larger than the <range>");
return -1;
}
int i = 0;
range = Integer.parseInt(argv[i++]);
counts = Integer.parseInt(argv[i++]);
launch();
return 0;
}
}
| 28,632 | 34.701995 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalMRNotification.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
/**
* Tests Job end notification in local mode.
*/
public class TestLocalMRNotification extends NotificationTestCase {
public TestLocalMRNotification() throws IOException {
super(HadoopTestCase.LOCAL_MR);
}
}
| 1,094 | 31.205882 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCommandLineJobSubmission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Ignore;
/**
* check for the job submission options of
* -libjars -files -archives
*/
@Ignore
public class TestCommandLineJobSubmission extends TestCase {
// Input output paths for this..
// these are all dummy and does not test
// much in map reduce except for the command line
// params
static final Path input = new Path("/test/input/");
static final Path output = new Path("/test/output");
File buildDir = new File(System.getProperty("test.build.data", "/tmp"));
public void testJobShell() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fs = null;
Path testFile = new Path(input, "testfile");
try {
Configuration conf = new Configuration();
//start the mini mr and dfs cluster.
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = dfs.getFileSystem();
FSDataOutputStream stream = fs.create(testFile);
stream.write("teststring".getBytes());
stream.close();
mr = new MiniMRCluster(2, fs.getUri().toString(), 1);
File thisbuildDir = new File(buildDir, "jobCommand");
assertTrue("create build dir", thisbuildDir.mkdirs());
File f = new File(thisbuildDir, "files_tmp");
FileOutputStream fstream = new FileOutputStream(f);
fstream.write("somestrings".getBytes());
fstream.close();
File f1 = new File(thisbuildDir, "files_tmp1");
fstream = new FileOutputStream(f1);
fstream.write("somestrings".getBytes());
fstream.close();
// copy files to dfs
Path cachePath = new Path("/cacheDir");
if (!fs.mkdirs(cachePath)) {
throw new IOException(
"Mkdirs failed to create " + cachePath.toString());
}
Path localCachePath = new Path(System.getProperty("test.cache.data"));
Path txtPath = new Path(localCachePath, new Path("test.txt"));
Path jarPath = new Path(localCachePath, new Path("test.jar"));
Path zipPath = new Path(localCachePath, new Path("test.zip"));
Path tarPath = new Path(localCachePath, new Path("test.tar"));
Path tgzPath = new Path(localCachePath, new Path("test.tgz"));
fs.copyFromLocalFile(txtPath, cachePath);
fs.copyFromLocalFile(jarPath, cachePath);
fs.copyFromLocalFile(zipPath, cachePath);
// construct options for -files
String[] files = new String[3];
files[0] = f.toString();
files[1] = f1.toString() + "#localfilelink";
files[2] =
fs.getUri().resolve(cachePath + "/test.txt#dfsfilelink").toString();
// construct options for -libjars
String[] libjars = new String[2];
libjars[0] = "build/test/mapred/testjar/testjob.jar";
libjars[1] = fs.getUri().resolve(cachePath + "/test.jar").toString();
// construct options for archives
String[] archives = new String[3];
archives[0] = tgzPath.toString();
archives[1] = tarPath + "#tarlink";
archives[2] =
fs.getUri().resolve(cachePath + "/test.zip#ziplink").toString();
String[] args = new String[10];
args[0] = "-files";
args[1] = StringUtils.arrayToString(files);
args[2] = "-libjars";
// the testjob.jar as a temporary jar file
// rather than creating its own
args[3] = StringUtils.arrayToString(libjars);
args[4] = "-archives";
args[5] = StringUtils.arrayToString(archives);
args[6] = "-D";
args[7] = "mapred.output.committer.class=testjar.CustomOutputCommitter";
args[8] = input.toString();
args[9] = output.toString();
JobConf jobConf = mr.createJobConf();
//before running the job, verify that libjar is not in client classpath
assertTrue("libjar not in client classpath", loadLibJar(jobConf)==null);
int ret = ToolRunner.run(jobConf,
new testshell.ExternalMapReduce(), args);
//after running the job, verify that libjar is in the client classpath
assertTrue("libjar added to client classpath", loadLibJar(jobConf)!=null);
assertTrue("not failed ", ret != -1);
f.delete();
thisbuildDir.delete();
} finally {
if (dfs != null) {dfs.shutdown();};
if (mr != null) {mr.shutdown();};
}
}
@SuppressWarnings("unchecked")
private Class loadLibJar(JobConf jobConf) {
try {
return jobConf.getClassByName("testjar.ClassWordCount");
} catch (ClassNotFoundException e) {
return null;
}
}
}
| 5,679 | 37.90411 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
public class TestMultiFileInputFormat extends TestCase{
private static JobConf job = new JobConf();
private static final Log LOG = LogFactory.getLog(TestMultiFileInputFormat.class);
private static final int MAX_SPLIT_COUNT = 10000;
private static final int SPLIT_COUNT_INCR = 6000;
private static final int MAX_BYTES = 1024;
private static final int MAX_NUM_FILES = 10000;
private static final int NUM_FILES_INCR = 8000;
private Random rand = new Random(System.currentTimeMillis());
private HashMap<String, Long> lengths = new HashMap<String, Long>();
/** Dummy class to extend MultiFileInputFormat*/
private class DummyMultiFileInputFormat extends MultiFileInputFormat<Text, Text> {
@Override
public RecordReader<Text,Text> getRecordReader(InputSplit split, JobConf job
, Reporter reporter) throws IOException {
return null;
}
}
private Path initFiles(FileSystem fs, int numFiles, int numBytes) throws IOException{
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path multiFileDir = new Path(dir, "test.multifile");
fs.delete(multiFileDir, true);
fs.mkdirs(multiFileDir);
LOG.info("Creating " + numFiles + " file(s) in " + multiFileDir);
for(int i=0; i<numFiles ;i++) {
Path path = new Path(multiFileDir, "file_" + i);
FSDataOutputStream out = fs.create(path);
if (numBytes == -1) {
numBytes = rand.nextInt(MAX_BYTES);
}
for(int j=0; j< numBytes; j++) {
out.write(rand.nextInt());
}
out.close();
if(LOG.isDebugEnabled()) {
LOG.debug("Created file " + path + " with length " + numBytes);
}
lengths.put(path.getName(), new Long(numBytes));
}
FileInputFormat.setInputPaths(job, multiFileDir);
return multiFileDir;
}
public void testFormat() throws IOException {
LOG.info("Test started");
LOG.info("Max split count = " + MAX_SPLIT_COUNT);
LOG.info("Split count increment = " + SPLIT_COUNT_INCR);
LOG.info("Max bytes per file = " + MAX_BYTES);
LOG.info("Max number of files = " + MAX_NUM_FILES);
LOG.info("Number of files increment = " + NUM_FILES_INCR);
MultiFileInputFormat<Text,Text> format = new DummyMultiFileInputFormat();
FileSystem fs = FileSystem.getLocal(job);
for(int numFiles = 1; numFiles< MAX_NUM_FILES ;
numFiles+= (NUM_FILES_INCR / 2) + rand.nextInt(NUM_FILES_INCR / 2)) {
Path dir = initFiles(fs, numFiles, -1);
BitSet bits = new BitSet(numFiles);
for(int i=1;i< MAX_SPLIT_COUNT ;i+= rand.nextInt(SPLIT_COUNT_INCR) + 1) {
LOG.info("Running for Num Files=" + numFiles + ", split count=" + i);
MultiFileSplit[] splits = (MultiFileSplit[])format.getSplits(job, i);
bits.clear();
for(MultiFileSplit split : splits) {
long splitLength = 0;
for(Path p : split.getPaths()) {
long length = fs.getContentSummary(p).getLength();
assertEquals(length, lengths.get(p.getName()).longValue());
splitLength += length;
String name = p.getName();
int index = Integer.parseInt(
name.substring(name.lastIndexOf("file_") + 5));
assertFalse(bits.get(index));
bits.set(index);
}
assertEquals(splitLength, split.getLength());
}
}
assertEquals(bits.cardinality(), numFiles);
fs.delete(dir, true);
}
LOG.info("Test Finished");
}
public void testFormatWithLessPathsThanSplits() throws Exception {
MultiFileInputFormat<Text,Text> format = new DummyMultiFileInputFormat();
FileSystem fs = FileSystem.getLocal(job);
// Test with no path
initFiles(fs, 0, -1);
assertEquals(0, format.getSplits(job, 2).length);
// Test with 2 path and 4 splits
initFiles(fs, 2, 500);
assertEquals(2, format.getSplits(job, 4).length);
}
public static void main(String[] args) throws Exception{
TestMultiFileInputFormat test = new TestMultiFileInputFormat();
test.testFormat();
}
}
| 5,405 | 36.541667 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestIFile {
@Test
/**
* Create an IFile.Writer using GzipCodec since this code does not
* have a compressor when run via the tests (ie no native libraries).
*/
public void testIFileWriterWithCodec() throws Exception {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
Path path = new Path(new Path("build/test.ifile"), "data");
DefaultCodec codec = new GzipCodec();
codec.setConf(conf);
IFile.Writer<Text, Text> writer =
new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class,
codec, null);
writer.close();
}
@Test
/** Same as above but create a reader. */
public void testIFileReaderWithCodec() throws Exception {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
Path path = new Path(new Path("build/test.ifile"), "data");
DefaultCodec codec = new GzipCodec();
codec.setConf(conf);
FSDataOutputStream out = rfs.create(path);
IFile.Writer<Text, Text> writer =
new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class,
codec, null);
writer.close();
FSDataInputStream in = rfs.open(path);
IFile.Reader<Text, Text> reader =
new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(),
codec, null);
reader.close();
// test check sum
byte[] ab= new byte[100];
int readed= reader.checksumIn.readWithChecksum(ab, 0, ab.length);
assertEquals( readed,reader.checksumIn.getChecksum().length);
}
}
| 3,057 | 37.708861 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.UtilsForTests.RandomInputFormat;
import org.apache.hadoop.mapreduce.MRConfig;
import junit.framework.TestCase;
import java.io.*;
import java.util.*;
/**
* TestCollect checks if the collect can handle simultaneous invocations.
*/
public class TestCollect extends TestCase
{
final static Path OUTPUT_DIR = new Path("build/test/test.collect.output");
static final int NUM_FEEDERS = 10;
static final int NUM_COLLECTS_PER_THREAD = 1000;
/**
* Map is a Mapper that spawns threads which simultaneously call collect.
* Each thread has a specific range to write to the buffer and is unique to
* the thread. This is a synchronization test for the map's collect.
*/
static class Map
implements Mapper<Text, Text, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void map(Text key, Text val,
final OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
// Class for calling collect in separate threads
class CollectFeeder extends Thread {
int id; // id for the thread
public CollectFeeder(int id) {
this.id = id;
}
public void run() {
for (int j = 1; j <= NUM_COLLECTS_PER_THREAD; j++) {
try {
out.collect(new IntWritable((id * NUM_COLLECTS_PER_THREAD) + j),
new IntWritable(0));
} catch (IOException ioe) { }
}
}
}
CollectFeeder [] feeders = new CollectFeeder[NUM_FEEDERS];
// start the feeders
for (int i = 0; i < NUM_FEEDERS; i++) {
feeders[i] = new CollectFeeder(i);
feeders[i].start();
}
// wait for them to finish
for (int i = 0; i < NUM_FEEDERS; i++) {
try {
feeders[i].join();
} catch (InterruptedException ie) {
throw new IOException(ie.toString());
}
}
}
public void close() {
}
}
static class Reduce
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
static int numSeen;
static int actualSum;
public void configure(JobConf job) { }
public void reduce(IntWritable key, Iterator<IntWritable> val,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
actualSum += key.get(); // keep the running count of the seen values
numSeen++; // number of values seen so far
// using '1+2+3+...n = n*(n+1)/2' to validate
int expectedSum = numSeen * (numSeen + 1) / 2;
if (expectedSum != actualSum) {
throw new IOException("Collect test failed!! Ordering mismatch.");
}
}
public void close() { }
}
public void configure(JobConf conf) throws IOException {
conf.setJobName("TestCollect");
conf.setJarByClass(TestCollect.class);
conf.setInputFormat(RandomInputFormat.class); // for self data generation
conf.setOutputKeyClass(IntWritable.class);
conf.setOutputValueClass(IntWritable.class);
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.setMapperClass(Map.class);
conf.setReducerClass(Reduce.class);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
}
public void testCollect() throws IOException {
JobConf conf = new JobConf();
configure(conf);
try {
JobClient.runJob(conf);
// check if all the values were seen by the reducer
if (Reduce.numSeen != (NUM_COLLECTS_PER_THREAD * NUM_FEEDERS)) {
throw new IOException("Collect test failed!! Total does not match.");
}
} catch (IOException ioe) {
throw ioe;
} finally {
FileSystem fs = FileSystem.get(conf);
fs.delete(OUTPUT_DIR, true);
}
}
public static void main(String[] args) throws IOException {
new TestCollect().testCollect();
}
}
| 5,005 | 31.718954 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.split.JobSplitWriter;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Validates map phase progress.
* Testcase uses newApi.
* We extend Task.TaskReporter class and override setProgress()
* to validate the map phase progress being set.
* We extend MapTask and override startReporter() method that creates
* TestTaskReporter instead of TaskReporter and call mapTask.run().
* Similar to LocalJobRunner, we set up splits and call mapTask.run()
* directly. No job is run, only map task is run.
* As the reporter's setProgress() validates progress after
* every record is read, we are done with the validation of map phase progress
* once mapTask.run() is finished. Sort phase progress in map task is not
* validated here.
*/
public class TestMapProgress extends TestCase {
public static final Log LOG = LogFactory.getLog(TestMapProgress.class);
private static String TEST_ROOT_DIR;
static {
String root = new File(System.getProperty("test.build.data", "/tmp"))
.getAbsolutePath();
TEST_ROOT_DIR = new Path(root, "mapPhaseprogress").toString();
}
static class FakeUmbilical implements TaskUmbilicalProtocol {
public long getProtocolVersion(String protocol, long clientVersion) {
return TaskUmbilicalProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(
this, protocol, clientVersion, clientMethodsHash);
}
public void done(TaskAttemptID taskid) throws IOException {
LOG.info("Task " + taskid + " reporting done.");
}
public void fsError(TaskAttemptID taskId, String message) throws IOException {
LOG.info("Task " + taskId + " reporting file system error: " + message);
}
public void shuffleError(TaskAttemptID taskId, String message) throws IOException {
LOG.info("Task " + taskId + " reporting shuffle error: " + message);
}
public void fatalError(TaskAttemptID taskId, String msg) throws IOException {
LOG.info("Task " + taskId + " reporting fatal error: " + msg);
}
public JvmTask getTask(JvmContext context) throws IOException {
return null;
}
public boolean ping(TaskAttemptID taskid) throws IOException {
return true;
}
public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException {
statusUpdate(taskId, taskStatus);
}
public boolean canCommit(TaskAttemptID taskid) throws IOException {
return true;
}
public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException {
StringBuffer buf = new StringBuffer("Task ");
buf.append(taskId);
if (taskStatus != null) {
buf.append(" making progress to ");
buf.append(taskStatus.getProgress());
String state = taskStatus.getStateString();
if (state != null) {
buf.append(" and state of ");
buf.append(state);
}
}
LOG.info(buf.toString());
// ignore phase
// ignore counters
return true;
}
public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) throws IOException {
LOG.info("Task " + taskid + " has problem " + trace);
}
public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId,
int fromEventId, int maxLocs, TaskAttemptID id) throws IOException {
return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY,
false);
}
public void reportNextRecordRange(TaskAttemptID taskid,
SortedRanges.Range range) throws IOException {
LOG.info("Task " + taskid + " reportedNextRecordRange " + range);
}
}
private FileSystem fs = null;
private TestMapTask map = null;
private JobID jobId = null;
private FakeUmbilical fakeUmbilical = new FakeUmbilical();
/**
* Task Reporter that validates map phase progress after each record is
* processed by map task
*/
public class TestTaskReporter extends Task.TaskReporter {
private int recordNum = 0; // number of records processed
TestTaskReporter(Task task) {
task.super(task.getProgress(), fakeUmbilical);
}
@Override
public void setProgress(float progress) {
super.setProgress(progress);
float mapTaskProgress = map.getProgress().getProgress();
LOG.info("Map task progress is " + mapTaskProgress);
if (recordNum < 3) {
// only 3 records are there; Ignore validating progress after 3 times
recordNum++;
}
else {
return;
}
// validate map task progress when the map task is in map phase
assertTrue("Map progress is not the expected value.",
Math.abs(mapTaskProgress - ((float)recordNum/3)) < 0.001);
}
}
/**
* Map Task that overrides run method and uses TestTaskReporter instead of
* TaskReporter and uses FakeUmbilical.
*/
class TestMapTask extends MapTask {
public TestMapTask(String jobFile, TaskAttemptID taskId,
int partition, TaskSplitIndex splitIndex,
int numSlotsRequired) {
super(jobFile, taskId, partition, splitIndex, numSlotsRequired);
}
/**
* Create a TestTaskReporter and use it for validating map phase progress
*/
@Override
TaskReporter startReporter(final TaskUmbilicalProtocol umbilical) {
// start thread that will handle communication with parent
TaskReporter reporter = new TestTaskReporter(map);
return reporter;
}
}
// In the given dir, creates part-0 file with 3 records of same size
private void createInputFile(Path rootDir) throws IOException {
if(fs.exists(rootDir)){
fs.delete(rootDir, true);
}
String str = "The quick brown fox\n" + "The brown quick fox\n"
+ "The fox brown quick\n";
DataOutputStream inpFile = fs.create(new Path(rootDir, "part-0"));
inpFile.writeBytes(str);
inpFile.close();
}
/**
* Validates map phase progress after each record is processed by map task
* using custom task reporter.
*/
public void testMapProgress() throws Exception {
JobConf job = new JobConf();
fs = FileSystem.getLocal(job);
Path rootDir = new Path(TEST_ROOT_DIR);
createInputFile(rootDir);
job.setNumReduceTasks(0);
TaskAttemptID taskId = TaskAttemptID.forName(
"attempt_200907082313_0424_m_000000_0");
job.setClass("mapreduce.job.outputformat.class",
NullOutputFormat.class, OutputFormat.class);
job.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
TEST_ROOT_DIR);
jobId = taskId.getJobID();
JobContext jContext = new JobContextImpl(job, jobId);
InputFormat<?, ?> input =
ReflectionUtils.newInstance(jContext.getInputFormatClass(), job);
List<InputSplit> splits = input.getSplits(jContext);
JobSplitWriter.createSplitFiles(new Path(TEST_ROOT_DIR), job,
new Path(TEST_ROOT_DIR).getFileSystem(job),
splits);
TaskSplitMetaInfo[] splitMetaInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, job, new Path(TEST_ROOT_DIR));
job.setUseNewMapper(true); // use new api
for (int i = 0; i < splitMetaInfo.length; i++) {// rawSplits.length is 1
map = new TestMapTask(
job.get(JTConfig.JT_SYSTEM_DIR, "/tmp/hadoop/mapred/system") +
jobId + "job.xml",
taskId, i,
splitMetaInfo[i].getSplitIndex(), 1);
JobConf localConf = new JobConf(job);
map.localizeConfiguration(localConf);
map.setConf(localConf);
map.run(localConf, fakeUmbilical);
}
// clean up
fs.delete(rootDir, true);
}
}
| 9,885 | 36.165414 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileInputFilter extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int MAX_LENGTH = 15000;
private static final Configuration conf = new Configuration();
private static final JobConf job = new JobConf(conf);
private static final FileSystem fs;
private static final Path inDir = new Path(System.getProperty("test.build.data",".") + "/mapred");
private static final Path inFile = new Path(inDir, "test.seq");
private static final Random random = new Random(1);
private static final Reporter reporter = Reporter.NULL;
static {
FileInputFormat.setInputPaths(job, inDir);
try {
fs = FileSystem.getLocal(conf);
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private static void createSequenceFile(int numRecords) throws Exception {
// create a file with length entries
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, inFile,
Text.class, BytesWritable.class);
try {
for (int i = 1; i <= numRecords; i++) {
Text key = new Text(Integer.toString(i));
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
}
private int countRecords(int numSplits) throws IOException {
InputFormat<Text, BytesWritable> format =
new SequenceFileInputFilter<Text, BytesWritable>();
Text key = new Text();
BytesWritable value = new BytesWritable();
if (numSplits==0) {
numSplits =
random.nextInt(MAX_LENGTH/(SequenceFile.SYNC_INTERVAL/20))+1;
}
InputSplit[] splits = format.getSplits(job, numSplits);
// check each split
int count = 0;
LOG.info("Generated " + splits.length + " splits.");
for (int j = 0; j < splits.length; j++) {
RecordReader<Text, BytesWritable> reader =
format.getRecordReader(splits[j], job, reporter);
try {
while (reader.next(key, value)) {
LOG.info("Accept record "+key.toString());
count++;
}
} finally {
reader.close();
}
}
return count;
}
public void testRegexFilter() throws Exception {
// set the filter class
LOG.info("Testing Regex Filter with patter: \\A10*");
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.RegexFilter.class);
SequenceFileInputFilter.RegexFilter.setPattern(job, "\\A10*");
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 1; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
LOG.info("******Number of records: "+length);
createSequenceFile(length);
int count = countRecords(0);
assertEquals(count, length==0?0:(int)Math.log10(length)+1);
}
// clean up
fs.delete(inDir, true);
}
public void testPercentFilter() throws Exception {
LOG.info("Testing Percent Filter with frequency: 1000");
// set the filter class
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.PercentFilter.class);
SequenceFileInputFilter.PercentFilter.setFrequency(job, 1000);
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
LOG.info("******Number of records: "+length);
createSequenceFile(length);
int count = countRecords(1);
LOG.info("Accepted "+count+" records");
int expectedCount = length/1000;
if (expectedCount*1000!=length)
expectedCount++;
assertEquals(count, expectedCount);
}
// clean up
fs.delete(inDir, true);
}
public void testMD5Filter() throws Exception {
// set the filter class
LOG.info("Testing MD5 Filter with frequency: 1000");
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.MD5Filter.class);
SequenceFileInputFilter.MD5Filter.setFrequency(job, 1000);
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
LOG.info("******Number of records: "+length);
createSequenceFile(length);
LOG.info("Accepted "+countRecords(0)+" records");
}
// clean up
fs.delete(inDir, true);
}
public static void main(String[] args) throws Exception {
TestSequenceFileInputFilter filter = new TestSequenceFileInputFilter();
filter.testRegexFilter();
}
}
| 5,950 | 32.621469 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
/**
* Test case to run a MapReduce job.
* <p/>
* It runs a 2 node cluster Hadoop with a 2 node DFS.
* <p/>
* The JobConf to use must be obtained via the creatJobConf() method.
* <p/>
* It creates a temporary directory -accessible via getTestRootDir()-
* for both input and output.
* <p/>
* The input directory is accesible via getInputDir() and the output
* directory via getOutputDir()
* <p/>
* The DFS filesystem is formated before the testcase starts and after it ends.
*/
public abstract class ClusterMapReduceTestCase extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
/**
* Creates Hadoop Cluster and DFS before a test case is run.
*
* @throws Exception
*/
protected void setUp() throws Exception {
super.setUp();
startCluster(true, null);
}
/**
* Starts the cluster within a testcase.
* <p/>
* Note that the cluster is already started when the testcase method
* is invoked. This method is useful if as part of the testcase the
* cluster has to be shutdown and restarted again.
* <p/>
* If the cluster is already running this method does nothing.
*
* @param reformatDFS indicates if DFS has to be reformated
* @param props configuration properties to inject to the mini cluster
* @throws Exception if the cluster could not be started
*/
protected synchronized void startCluster(boolean reformatDFS, Properties props)
throws Exception {
if (dfsCluster == null) {
JobConf conf = new JobConf();
if (props != null) {
for (Map.Entry entry : props.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
}
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(reformatDFS).racks(null).build();
ConfigurableMiniMRCluster.setConfiguration(props);
//noinspection deprecation
mrCluster = new ConfigurableMiniMRCluster(2,
getFileSystem().getUri().toString(), 1, conf);
}
}
private static class ConfigurableMiniMRCluster extends MiniMRCluster {
private static Properties config;
public static void setConfiguration(Properties props) {
config = props;
}
public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode,
int numDir, JobConf conf)
throws Exception {
super(0,0, numTaskTrackers, namenode, numDir, null, null, null, conf);
}
public JobConf createJobConf() {
JobConf conf = super.createJobConf();
if (config != null) {
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
}
return conf;
}
}
/**
* Stops the cluster within a testcase.
* <p/>
* Note that the cluster is already started when the testcase method
* is invoked. This method is useful if as part of the testcase the
* cluster has to be shutdown.
* <p/>
* If the cluster is already stopped this method does nothing.
*
* @throws Exception if the cluster could not be stopped
*/
protected void stopCluster() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
mrCluster = null;
}
if (dfsCluster != null) {
dfsCluster.shutdown();
dfsCluster = null;
}
}
/**
* Destroys Hadoop Cluster and DFS after a test case is run.
*
* @throws Exception
*/
protected void tearDown() throws Exception {
stopCluster();
super.tearDown();
}
/**
* Returns a preconfigured Filesystem instance for test cases to read and
* write files to it.
* <p/>
* TestCases should use this Filesystem instance.
*
* @return the filesystem used by Hadoop.
* @throws IOException
*/
protected FileSystem getFileSystem() throws IOException {
return dfsCluster.getFileSystem();
}
protected MiniMRCluster getMRCluster() {
return mrCluster;
}
/**
* Returns the path to the root directory for the testcase.
*
* @return path to the root directory for the testcase.
*/
protected Path getTestRootDir() {
return new Path("x").getParent();
}
/**
* Returns a path to the input directory for the testcase.
*
* @return path to the input directory for the tescase.
*/
protected Path getInputDir() {
return new Path("target/input");
}
/**
* Returns a path to the output directory for the testcase.
*
* @return path to the output directory for the tescase.
*/
protected Path getOutputDir() {
return new Path("target/output");
}
/**
* Returns a job configuration preconfigured to run against the Hadoop
* managed by the testcase.
*
* @return configuration that works on the testcase Hadoop instance
*/
protected JobConf createJobConf() {
return mrCluster.createJobConf();
}
}
| 5,999 | 28.70297 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
import org.apache.hadoop.mapred.JobClient.NetworkedJob;
import org.apache.hadoop.mapred.JobClient.TaskStatusFilter;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.junit.Test;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
public class TestNetworkedJob {
private static String TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp")).toURI().toString().replace(' ', '+');
private static Path testDir = new Path(TEST_ROOT_DIR + "/test_mini_mr_local");
private static Path inFile = new Path(testDir, "in");
private static Path outDir = new Path(testDir, "out");
@Test (timeout=5000)
public void testGetNullCounters() throws Exception {
//mock creation
Job mockJob = mock(Job.class);
RunningJob underTest = new JobClient.NetworkedJob(mockJob);
when(mockJob.getCounters()).thenReturn(null);
assertNull(underTest.getCounters());
//verification
verify(mockJob).getCounters();
}
@Test (timeout=500000)
public void testGetJobStatus() throws IOException, InterruptedException,
ClassNotFoundException {
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
try {
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
RunningJob rj = client.submitJob(job);
JobID jobId = rj.getID();
// The following asserts read JobStatus twice and ensure the returned
// JobStatus objects correspond to the same Job.
assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId)
.getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus()
.getStartTime(), client.getJob(jobId).getJobStatus()
.getStartTime());
} finally {
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
/**
* test JobConf
* @throws Exception
*/
@SuppressWarnings( "deprecation" )
@Test (timeout=500000)
public void testNetworkedJob() throws Exception {
// mock creation
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
try {
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
RunningJob rj = client.submitJob(job);
JobID jobId = rj.getID();
NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
runningJob.setJobPriority(JobPriority.HIGH.name());
// test getters
assertTrue(runningJob.getConfiguration().toString()
.endsWith("0001/job.xml"));
assertEquals(runningJob.getID(), jobId);
assertEquals(runningJob.getJobID(), jobId.toString());
assertEquals(runningJob.getJobName(), "N/A");
assertTrue(runningJob.getJobFile().endsWith(
".staging/" + runningJob.getJobID() + "/job.xml"));
assertTrue(runningJob.getTrackingURL().length() > 0);
assertTrue(runningJob.mapProgress() == 0.0f);
assertTrue(runningJob.reduceProgress() == 0.0f);
assertTrue(runningJob.cleanupProgress() == 0.0f);
assertTrue(runningJob.setupProgress() == 0.0f);
TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
assertEquals(tce.length, 0);
assertEquals(runningJob.getHistoryUrl(),"");
assertFalse(runningJob.isRetired());
assertEquals( runningJob.getFailureInfo(),"");
assertEquals(runningJob.getJobStatus().getJobName(), "N/A");
assertEquals(client.getMapTaskReports(jobId).length, 0);
try {
client.getSetupTaskReports(jobId);
} catch (YarnRuntimeException e) {
assertEquals(e.getMessage(), "Unrecognized task type: JOB_SETUP");
}
try {
client.getCleanupTaskReports(jobId);
} catch (YarnRuntimeException e) {
assertEquals(e.getMessage(), "Unrecognized task type: JOB_CLEANUP");
}
assertEquals(client.getReduceTaskReports(jobId).length, 0);
// test ClusterStatus
ClusterStatus status = client.getClusterStatus(true);
assertEquals(status.getActiveTrackerNames().size(), 2);
// it method does not implemented and always return empty array or null;
assertEquals(status.getBlacklistedTrackers(), 0);
assertEquals(status.getBlacklistedTrackerNames().size(), 0);
assertEquals(status.getBlackListedTrackersInfo().size(), 0);
assertEquals(status.getJobTrackerStatus(), JobTrackerStatus.RUNNING);
assertEquals(status.getMapTasks(), 1);
assertEquals(status.getMaxMapTasks(), 20);
assertEquals(status.getMaxReduceTasks(), 4);
assertEquals(status.getNumExcludedNodes(), 0);
assertEquals(status.getReduceTasks(), 1);
assertEquals(status.getTaskTrackers(), 2);
assertEquals(status.getTTExpiryInterval(), 0);
assertEquals(status.getJobTrackerStatus(), JobTrackerStatus.RUNNING);
assertEquals(status.getGraylistedTrackers(), 0);
// test read and write
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
status.write(new DataOutputStream(dataOut));
ClusterStatus status2 = new ClusterStatus();
status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut
.toByteArray())));
assertEquals(status.getActiveTrackerNames(),
status2.getActiveTrackerNames());
assertEquals(status.getBlackListedTrackersInfo(),
status2.getBlackListedTrackersInfo());
assertEquals(status.getMapTasks(), status2.getMapTasks());
try {
} catch (RuntimeException e) {
assertTrue(e.getMessage().endsWith("not found on CLASSPATH"));
}
// test taskStatusfilter
JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
assertEquals(JobClient.getTaskOutputFilter(job), TaskStatusFilter.ALL);
// runningJob.setJobPriority(JobPriority.HIGH.name());
// test default map
assertEquals(client.getDefaultMaps(), 20);
assertEquals(client.getDefaultReduces(), 4);
assertEquals(client.getSystemDir().getName(), "jobSubmitDir");
// test queue information
JobQueueInfo[] rootQueueInfo = client.getRootQueues();
assertEquals(rootQueueInfo.length, 1);
assertEquals(rootQueueInfo[0].getQueueName(), "default");
JobQueueInfo[] qinfo = client.getQueues();
assertEquals(qinfo.length, 1);
assertEquals(qinfo[0].getQueueName(), "default");
assertEquals(client.getChildQueues("default").length, 0);
assertEquals(client.getJobsFromQueue("default").length, 1);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith(
"/job.xml"));
JobQueueInfo qi = client.getQueueInfo("default");
assertEquals(qi.getQueueName(), "default");
assertEquals(qi.getQueueState(), "running");
QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
assertEquals(aai.length, 2);
assertEquals(aai[0].getQueueName(), "root");
assertEquals(aai[1].getQueueName(), "default");
// test token
Token<DelegationTokenIdentifier> token = client
.getDelegationToken(new Text(UserGroupInformation.getCurrentUser()
.getShortUserName()));
assertEquals(token.getKind().toString(), "RM_DELEGATION_TOKEN");
// test JobClient
// The following asserts read JobStatus twice and ensure the returned
// JobStatus objects correspond to the same Job.
assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId)
.getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus()
.getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
} finally {
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
/**
* test BlackListInfo class
*
* @throws IOException
*/
@Test (timeout=5000)
public void testBlackListInfo() throws IOException {
BlackListInfo info = new BlackListInfo();
info.setBlackListReport("blackListInfo");
info.setReasonForBlackListing("reasonForBlackListing");
info.setTrackerName("trackerName");
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(byteOut);
info.write(out);
BlackListInfo info2 = new BlackListInfo();
info2.readFields(new DataInputStream(new ByteArrayInputStream(byteOut
.toByteArray())));
assertEquals(info, info);
assertEquals(info.toString(), info.toString());
assertEquals(info.getTrackerName(), "trackerName");
assertEquals(info.getReasonForBlackListing(), "reasonForBlackListing");
assertEquals(info.getBlackListReport(), "blackListInfo");
}
/**
* test run from command line JobQueueClient
* @throws Exception
*/
@Test (timeout=500000)
public void testJobQueueClient() throws Exception {
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
PrintStream oldOut = System.out;
try {
mr = createMiniClusterWithCapacityScheduler();
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
client.submitJob(job);
JobQueueClient jobClient = new JobQueueClient(job);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg = { "-list" };
jobClient.run(arg);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg1 = { "-showacls" };
jobClient.run(arg1);
assertTrue(bytes.toString().contains("Queue acls for user :"));
assertTrue(bytes.toString().contains(
"root ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
assertTrue(bytes.toString().contains(
"default ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
// test for info and default queue
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg2 = { "-info", "default" };
jobClient.run(arg2);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
// test for info , default queue and jobs
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg3 = { "-info", "default", "-showJobs" };
jobClient.run(arg3);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
assertTrue(bytes.toString().contains("job_1"));
String[] arg4 = {};
jobClient.run(arg4);
} finally {
System.setOut(oldOut);
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
private MiniMRClientCluster createMiniClusterWithCapacityScheduler()
throws IOException {
Configuration conf = new Configuration();
// Expected queue names depending on Capacity Scheduler queue naming
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
CapacityScheduler.class);
return MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
}
}
| 15,295 | 36.861386 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.TestRPC.TestImpl;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.mapred.AuditLogger.Keys;
import org.apache.hadoop.net.NetUtils;
import junit.framework.TestCase;
/**
* Tests {@link AuditLogger}.
*/
public class TestAuditLogger extends TestCase {
private static final String USER = "test";
private static final String OPERATION = "oper";
private static final String TARGET = "tgt";
private static final String PERM = "admin group";
private static final String DESC = "description of an audit log";
/**
* Test the AuditLog format with key-val pair.
*/
public void testKeyValLogFormat() {
StringBuilder actLog = new StringBuilder();
StringBuilder expLog = new StringBuilder();
// add the first k=v pair and check
AuditLogger.start(Keys.USER, USER, actLog);
expLog.append("USER=test");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=v1 pair to already added k=v and test
AuditLogger.add(Keys.OPERATION, OPERATION, actLog);
expLog.append("\tOPERATION=oper");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=null pair and test
AuditLogger.add(Keys.PERMISSIONS, (String)null, actLog);
expLog.append("\tPERMISSIONS=null");
assertEquals(expLog.toString(), actLog.toString());
// now add the target and check of the final string
AuditLogger.add(Keys.TARGET, TARGET, actLog);
expLog.append("\tTARGET=tgt");
assertEquals(expLog.toString(), actLog.toString());
}
/**
* Test the AuditLog format for successful events.
*/
private void testSuccessLogFormat(boolean checkIP) {
// check without the IP
String sLog = AuditLogger.createSuccessLog(USER, OPERATION, TARGET);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t");
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=SUCCESS");
assertEquals(expLog.toString(), sLog);
}
/**
* Test the AuditLog format for failure events.
*/
private void testFailureLogFormat(boolean checkIP, String perm) {
String fLog =
AuditLogger.createFailureLog(USER, OPERATION, perm, TARGET, DESC);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t");
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=FAILURE\t");
expLog.append("DESCRIPTION=description of an audit log\t");
expLog.append("PERMISSIONS=" + perm);
assertEquals(expLog.toString(), fLog);
}
/**
* Test the AuditLog format for failure events.
*/
private void testFailureLogFormat(boolean checkIP) {
testFailureLogFormat(checkIP, PERM);
testFailureLogFormat(checkIP, null);
}
/**
* Test {@link AuditLogger} without IP set.
*/
public void testAuditLoggerWithoutIP() throws Exception {
// test without ip
testSuccessLogFormat(false);
testFailureLogFormat(false);
}
/**
* A special extension of {@link TestImpl} RPC server with
* {@link TestImpl#ping()} testing the audit logs.
*/
@ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRPC$TestProtocol")
private class MyTestRPCServer extends TestImpl {
@Override
public void ping() {
// test with ip set
testSuccessLogFormat(true);
testFailureLogFormat(true);
}
}
/**
* Test {@link AuditLogger} with IP set.
*/
public void testAuditLoggerWithIP() throws Exception {
Configuration conf = new Configuration();
// start the IPC server
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
.setPort(0).build();
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
// Make a client connection and test the audit log
TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
// Start the testcase
proxy.ping();
server.stop();
}
}
| 5,382 | 32.855346 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.Random;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.MRConfig;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Two different types of comparators can be used in MapReduce. One is used
* during the Map and Reduce phases, to sort/merge key-value pairs. Another
* is used to group values for a particular key, when calling the user's
* reducer. A user can override both of these two.
* This class has tests for making sure we use the right comparators at the
* right places. See Hadoop issues 485 and 1535. Our tests:
* 1. Test that the same comparator is used for all sort/merge operations
* during the Map and Reduce phases.
* 2. Test the common use case where values are grouped by keys but values
* within each key are grouped by a secondary key (a timestamp, for example).
*/
public class TestComparators {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestComparators-mapred");
JobConf conf = new JobConf(TestMapOutputType.class);
JobClient jc;
static Random rng = new Random();
/**
* RandomGen is a mapper that generates 5 random values for each key
* in the input. The values are in the range [0-4]. The mapper also
* generates a composite key. If the input key is x and the generated
* value is y, the composite key is x0y (x-zero-y). Therefore, the inter-
* mediate key value pairs are ordered by {input key, value}.
* Think of the random value as a timestamp associated with the record.
*/
static class RandomGenMapper
implements Mapper<IntWritable, Writable, IntWritable, IntWritable> {
public void configure(JobConf job) {
}
public void map(IntWritable key, Writable value,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
int num_values = 5;
for(int i = 0; i < num_values; ++i) {
int val = rng.nextInt(num_values);
int compositeKey = key.get() * 100 + val;
out.collect(new IntWritable(compositeKey), new IntWritable(val));
}
}
public void close() {
}
}
/**
* Your basic identity mapper.
*/
static class IdentityMapper
implements Mapper<WritableComparable, Writable,
WritableComparable, Writable> {
public void configure(JobConf job) {
}
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out,
Reporter reporter) throws IOException {
out.collect(key, value);
}
public void close() {
}
}
/**
* Checks whether keys are in ascending order.
*/
static class AscendingKeysReducer
implements Reducer<IntWritable, Writable, IntWritable, Text> {
public void configure(JobConf job) {}
// keep track of the last key we've seen
private int lastKey = Integer.MIN_VALUE;
public void reduce(IntWritable key, Iterator<Writable> values,
OutputCollector<IntWritable, Text> out,
Reporter reporter) throws IOException {
int currentKey = key.get();
// keys should be in ascending order
if (currentKey < lastKey) {
fail("Keys not in sorted ascending order");
}
lastKey = currentKey;
out.collect(key, new Text("success"));
}
public void close() {}
}
/**
* Checks whether keys are in ascending order.
*/
static class DescendingKeysReducer
implements Reducer<IntWritable, Writable, IntWritable, Text> {
public void configure(JobConf job) {}
// keep track of the last key we've seen
private int lastKey = Integer.MAX_VALUE;
public void reduce(IntWritable key, Iterator<Writable> values,
OutputCollector<IntWritable, Text> out,
Reporter reporter) throws IOException {
int currentKey = key.get();
// keys should be in descending order
if (currentKey > lastKey) {
fail("Keys not in sorted descending order");
}
lastKey = currentKey;
out.collect(key, new Text("success"));
}
public void close() {}
}
/** The reducer checks whether the input values are in ascending order and
* whether they are correctly grouped by key (i.e. each call to reduce
* should have 5 values if the grouping is correct). It also checks whether
* the keys themselves are in ascending order.
*/
static class AscendingGroupReducer
implements Reducer<IntWritable, IntWritable, IntWritable, Text> {
public void configure(JobConf job) {
}
// keep track of the last key we've seen
private int lastKey = Integer.MIN_VALUE;
public void reduce(IntWritable key,
Iterator<IntWritable> values,
OutputCollector<IntWritable, Text> out,
Reporter reporter) throws IOException {
// check key order
int currentKey = key.get();
if (currentKey < lastKey) {
fail("Keys not in sorted ascending order");
}
lastKey = currentKey;
// check order of values
IntWritable previous = new IntWritable(Integer.MIN_VALUE);
int valueCount = 0;
while (values.hasNext()) {
IntWritable current = values.next();
// Check that the values are sorted
if (current.compareTo(previous) < 0)
fail("Values generated by Mapper not in order");
previous = current;
++valueCount;
}
if (valueCount != 5) {
fail("Values not grouped by primary key");
}
out.collect(key, new Text("success"));
}
public void close() {
}
}
/** The reducer checks whether the input values are in descending order and
* whether they are correctly grouped by key (i.e. each call to reduce
* should have 5 values if the grouping is correct).
*/
static class DescendingGroupReducer
implements Reducer<IntWritable, IntWritable, IntWritable, Text> {
public void configure(JobConf job) {
}
// keep track of the last key we've seen
private int lastKey = Integer.MAX_VALUE;
public void reduce(IntWritable key,
Iterator<IntWritable> values,
OutputCollector<IntWritable, Text> out,
Reporter reporter) throws IOException {
// check key order
int currentKey = key.get();
if (currentKey > lastKey) {
fail("Keys not in sorted descending order");
}
lastKey = currentKey;
// check order of values
IntWritable previous = new IntWritable(Integer.MAX_VALUE);
int valueCount = 0;
while (values.hasNext()) {
IntWritable current = values.next();
// Check that the values are sorted
if (current.compareTo(previous) > 0)
fail("Values generated by Mapper not in order");
previous = current;
++valueCount;
}
if (valueCount != 5) {
fail("Values not grouped by primary key");
}
out.collect(key, new Text("success"));
}
public void close() {
}
}
/**
* A decreasing Comparator for IntWritable
*/
public static class DecreasingIntComparator extends IntWritable.Comparator {
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return -super.compare(b1, s1, l1, b2, s2, l2);
}
static { // register this comparator
WritableComparator.define(DecreasingIntComparator.class,
new IntWritable.Comparator());
}
}
/** Grouping function for values based on the composite key. This
* comparator strips off the secondary key part from the x0y composite
* and only compares the primary key value (x).
*/
public static class CompositeIntGroupFn extends WritableComparator {
public CompositeIntGroupFn() {
super(IntWritable.class);
}
public int compare (WritableComparable v1, WritableComparable v2) {
int val1 = ((IntWritable)(v1)).get() / 100;
int val2 = ((IntWritable)(v2)).get() / 100;
if (val1 < val2)
return 1;
else if (val1 > val2)
return -1;
else
return 0;
}
public boolean equals (IntWritable v1, IntWritable v2) {
int val1 = v1.get();
int val2 = v2.get();
return (val1/100) == (val2/100);
}
static {
WritableComparator.define(CompositeIntGroupFn.class,
new IntWritable.Comparator());
}
}
/** Reverse grouping function for values based on the composite key.
*/
public static class CompositeIntReverseGroupFn extends CompositeIntGroupFn {
public int compare (WritableComparable v1, WritableComparable v2) {
return -super.compare(v1, v2);
}
public boolean equals (IntWritable v1, IntWritable v2) {
return !(super.equals(v1, v2));
}
static {
WritableComparator.define(CompositeIntReverseGroupFn.class,
new IntWritable.Comparator());
}
}
@Before
public void configure() throws Exception {
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.get(conf);
fs.delete(testdir, true);
conf.setInputFormat(SequenceFileInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setOutputKeyClass(IntWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapOutputValueClass(IntWritable.class);
// set up two map jobs, so we can test merge phase in Reduce also
conf.setNumMapTasks(2);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.setOutputFormat(SequenceFileOutputFormat.class);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
// set up input data in 2 files
Path inFile = new Path(inDir, "part0");
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile,
IntWritable.class, IntWritable.class);
writer.append(new IntWritable(11), new IntWritable(999));
writer.append(new IntWritable(23), new IntWritable(456));
writer.append(new IntWritable(10), new IntWritable(780));
writer.close();
inFile = new Path(inDir, "part1");
writer = SequenceFile.createWriter(fs, conf, inFile,
IntWritable.class, IntWritable.class);
writer.append(new IntWritable(45), new IntWritable(100));
writer.append(new IntWritable(18), new IntWritable(200));
writer.append(new IntWritable(27), new IntWritable(300));
writer.close();
jc = new JobClient(conf);
}
@After
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
/**
* Test the default comparator for Map/Reduce.
* Use the identity mapper and see if the keys are sorted at the end
* @throws Exception
*/
@Test
public void testDefaultMRComparator() throws Exception {
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(AscendingKeysReducer.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
}
/**
* Test user-defined comparator for Map/Reduce.
* We provide our own comparator that is the reverse of the default int
* comparator. Keys should be sorted in reverse order in the reducer.
* @throws Exception
*/
@Test
public void testUserMRComparator() throws Exception {
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(DescendingKeysReducer.class);
conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
}
/**
* Test user-defined grouping comparator for grouping values in Reduce.
* We generate composite keys that contain a random number, which acts
* as a timestamp associated with the record. In our Reduce function,
* values for a key should be sorted by the 'timestamp'.
* @throws Exception
*/
@Test
public void testUserValueGroupingComparator() throws Exception {
conf.setMapperClass(RandomGenMapper.class);
conf.setReducerClass(AscendingGroupReducer.class);
conf.setOutputValueGroupingComparator(CompositeIntGroupFn.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
}
/**
* Test all user comparators. Super-test of all tests here.
* We generate composite keys that contain a random number, which acts
* as a timestamp associated with the record. In our Reduce function,
* values for a key should be sorted by the 'timestamp'.
* We also provide our own comparators that reverse the default sorting
* order. This lets us make sure that the right comparators are used.
* @throws Exception
*/
@Test
public void testAllUserComparators() throws Exception {
conf.setMapperClass(RandomGenMapper.class);
// use a decreasing comparator so keys are sorted in reverse order
conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
conf.setReducerClass(DescendingGroupReducer.class);
conf.setOutputValueGroupingComparator(CompositeIntReverseGroupFn.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
}
/**
* Test a user comparator that relies on deserializing both arguments
* for each compare.
*/
@Test
public void testBakedUserComparator() throws Exception {
MyWritable a = new MyWritable(8, 8);
MyWritable b = new MyWritable(7, 9);
assertTrue(a.compareTo(b) > 0);
assertTrue(WritableComparator.get(MyWritable.class).compare(a, b) < 0);
}
public static class MyWritable implements WritableComparable<MyWritable> {
int i, j;
public MyWritable() { }
public MyWritable(int i, int j) {
this.i = i;
this.j = j;
}
public void readFields(DataInput in) throws IOException {
i = in.readInt();
j = in.readInt();
}
public void write(DataOutput out) throws IOException {
out.writeInt(i);
out.writeInt(j);
}
public int compareTo(MyWritable b) {
return this.i - b.i;
}
static {
WritableComparator.define(MyWritable.class, new MyCmp());
}
}
public static class MyCmp extends WritableComparator {
public MyCmp() { super(MyWritable.class, true); }
public int compare(WritableComparable a, WritableComparable b) {
MyWritable aa = (MyWritable)a;
MyWritable bb = (MyWritable)b;
return aa.j - bb.j;
}
}
}
| 16,937 | 33.012048 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This class tests reliability of the framework in the face of failures of
* both tasks and tasktrackers. Steps:
* 1) Get the cluster status
* 2) Get the number of slots in the cluster
* 3) Spawn a sleepjob that occupies the entire cluster (with two waves of maps)
* 4) Get the list of running attempts for the job
* 5) Fail a few of them
* 6) Now fail a few trackers (ssh)
* 7) Job should run to completion
* 8) The above is repeated for the Sort suite of job (randomwriter, sort,
* validator). All jobs must complete, and finally, the sort validation
* should succeed.
* To run the test:
* ./bin/hadoop --config <config> jar
* build/hadoop-<version>-test.jar MRReliabilityTest -libjars
* build/hadoop-<version>-examples.jar [-scratchdir <dir>]"
*
* The scratchdir is optional and by default the current directory on the client
* will be used as the scratch space. Note that password-less SSH must be set up
* between the client machine from where the test is submitted, and the cluster
* nodes where the test runs.
*
* The test should be run on a <b>free</b> cluster where there is no other parallel
* job submission going on. Submission of other jobs while the test runs can cause
* the tests/jobs submitted to fail.
*/
public class ReliabilityTest extends Configured implements Tool {
private String dir;
private static final Log LOG = LogFactory.getLog(ReliabilityTest.class);
private void displayUsage() {
LOG.info("This must be run in only the distributed mode " +
"(LocalJobRunner not supported).\n\tUsage: MRReliabilityTest " +
"-libjars <path to hadoop-examples.jar> [-scratchdir <dir>]" +
"\n[-scratchdir] points to a scratch space on this host where temp" +
" files for this test will be created. Defaults to current working" +
" dir. \nPasswordless SSH must be set up between this host and the" +
" nodes which the test is going to use.\n"+
"The test should be run on a free cluster with no parallel job submission" +
" going on, as the test requires to restart TaskTrackers and kill tasks" +
" any job submission while the tests are running can cause jobs/tests to fail");
System.exit(-1);
}
public int run(String[] args) throws Exception {
Configuration conf = getConf();
if ("local".equals(conf.get(JTConfig.JT_IPC_ADDRESS, "local"))) {
displayUsage();
}
String[] otherArgs =
new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length == 2) {
if (otherArgs[0].equals("-scratchdir")) {
dir = otherArgs[1];
} else {
displayUsage();
}
}
else if (otherArgs.length == 0) {
dir = System.getProperty("user.dir");
} else {
displayUsage();
}
//to protect against the case of jobs failing even when multiple attempts
//fail, set some high values for the max attempts
conf.setInt(JobContext.MAP_MAX_ATTEMPTS, 10);
conf.setInt(JobContext.REDUCE_MAX_ATTEMPTS, 10);
runSleepJobTest(new JobClient(new JobConf(conf)), conf);
runSortJobTests(new JobClient(new JobConf(conf)), conf);
return 0;
}
private void runSleepJobTest(final JobClient jc, final Configuration conf)
throws Exception {
ClusterStatus c = jc.getClusterStatus();
int maxMaps = c.getMaxMapTasks() * 2;
int maxReduces = maxMaps;
int mapSleepTime = (int)c.getTTExpiryInterval();
int reduceSleepTime = mapSleepTime;
String[] sleepJobArgs = new String[] {
"-m", Integer.toString(maxMaps),
"-r", Integer.toString(maxReduces),
"-mt", Integer.toString(mapSleepTime),
"-rt", Integer.toString(reduceSleepTime)};
runTest(jc, conf, "org.apache.hadoop.mapreduce.SleepJob", sleepJobArgs,
new KillTaskThread(jc, 2, 0.2f, false, 2),
new KillTrackerThread(jc, 2, 0.4f, false, 1));
LOG.info("SleepJob done");
}
private void runSortJobTests(final JobClient jc, final Configuration conf)
throws Exception {
String inputPath = "my_reliability_test_input";
String outputPath = "my_reliability_test_output";
FileSystem fs = jc.getFs();
fs.delete(new Path(inputPath), true);
fs.delete(new Path(outputPath), true);
runRandomWriterTest(jc, conf, inputPath);
runSortTest(jc, conf, inputPath, outputPath);
runSortValidatorTest(jc, conf, inputPath, outputPath);
}
private void runRandomWriterTest(final JobClient jc,
final Configuration conf, final String inputPath)
throws Exception {
runTest(jc, conf, "org.apache.hadoop.examples.RandomWriter",
new String[]{inputPath},
null, new KillTrackerThread(jc, 0, 0.4f, false, 1));
LOG.info("RandomWriter job done");
}
private void runSortTest(final JobClient jc, final Configuration conf,
final String inputPath, final String outputPath)
throws Exception {
runTest(jc, conf, "org.apache.hadoop.examples.Sort",
new String[]{inputPath, outputPath},
new KillTaskThread(jc, 2, 0.2f, false, 2),
new KillTrackerThread(jc, 2, 0.8f, false, 1));
LOG.info("Sort job done");
}
private void runSortValidatorTest(final JobClient jc,
final Configuration conf, final String inputPath, final String outputPath)
throws Exception {
runTest(jc, conf, "org.apache.hadoop.mapred.SortValidator", new String[] {
"-sortInput", inputPath, "-sortOutput", outputPath},
new KillTaskThread(jc, 2, 0.2f, false, 1),
new KillTrackerThread(jc, 2, 0.8f, false, 1));
LOG.info("SortValidator job done");
}
private String normalizeCommandPath(String command) {
final String hadoopHome;
if ((hadoopHome = System.getenv("HADOOP_PREFIX")) != null) {
command = hadoopHome + "/" + command;
}
return command;
}
private void checkJobExitStatus(int status, String jobName) {
if (status != 0) {
LOG.info(jobName + " job failed with status: " + status);
System.exit(status);
} else {
LOG.info(jobName + " done.");
}
}
//Starts the job in a thread. It also starts the taskKill/tasktrackerKill
//threads.
private void runTest(final JobClient jc, final Configuration conf,
final String jobClass, final String[] args, KillTaskThread killTaskThread,
KillTrackerThread killTrackerThread) throws Exception {
Thread t = new Thread("Job Test") {
public void run() {
try {
Class<?> jobClassObj = conf.getClassByName(jobClass);
int status = ToolRunner.run(conf, (Tool)(jobClassObj.newInstance()),
args);
checkJobExitStatus(status, jobClass);
} catch (Exception e) {
LOG.fatal("JOB " + jobClass + " failed to run");
System.exit(-1);
}
}
};
t.setDaemon(true);
t.start();
JobStatus[] jobs;
//get the job ID. This is the job that we just submitted
while ((jobs = jc.jobsToComplete()).length == 0) {
LOG.info("Waiting for the job " + jobClass +" to start");
Thread.sleep(1000);
}
JobID jobId = jobs[jobs.length - 1].getJobID();
RunningJob rJob = jc.getJob(jobId);
if(rJob.isComplete()) {
LOG.error("The last job returned by the querying JobTracker is complete :" +
rJob.getJobID() + " .Exiting the test");
System.exit(-1);
}
while (rJob.getJobState() == JobStatus.PREP) {
LOG.info("JobID : " + jobId + " not started RUNNING yet");
Thread.sleep(1000);
rJob = jc.getJob(jobId);
}
if (killTaskThread != null) {
killTaskThread.setRunningJob(rJob);
killTaskThread.start();
killTaskThread.join();
LOG.info("DONE WITH THE TASK KILL/FAIL TESTS");
}
if (killTrackerThread != null) {
killTrackerThread.setRunningJob(rJob);
killTrackerThread.start();
killTrackerThread.join();
LOG.info("DONE WITH THE TESTS TO DO WITH LOST TASKTRACKERS");
}
t.join();
}
private class KillTrackerThread extends Thread {
private volatile boolean killed = false;
private JobClient jc;
private RunningJob rJob;
final private int thresholdMultiplier;
private float threshold = 0.2f;
private boolean onlyMapsProgress;
private int numIterations;
final private String slavesFile = dir + "/_reliability_test_slaves_file_";
final String shellCommand = normalizeCommandPath("bin/slaves.sh");
final private String STOP_COMMAND = "ps uwwx | grep java | grep " +
"org.apache.hadoop.mapred.TaskTracker"+ " |" +
" grep -v grep | tr -s ' ' | cut -d ' ' -f2 | xargs kill -s STOP";
final private String RESUME_COMMAND = "ps uwwx | grep java | grep " +
"org.apache.hadoop.mapred.TaskTracker"+ " |" +
" grep -v grep | tr -s ' ' | cut -d ' ' -f2 | xargs kill -s CONT";
//Only one instance must be active at any point
public KillTrackerThread(JobClient jc, int threshaldMultiplier,
float threshold, boolean onlyMapsProgress, int numIterations) {
this.jc = jc;
this.thresholdMultiplier = threshaldMultiplier;
this.threshold = threshold;
this.onlyMapsProgress = onlyMapsProgress;
this.numIterations = numIterations;
setDaemon(true);
}
public void setRunningJob(RunningJob rJob) {
this.rJob = rJob;
}
public void kill() {
killed = true;
}
public void run() {
stopStartTrackers(true);
if (!onlyMapsProgress) {
stopStartTrackers(false);
}
}
private void stopStartTrackers(boolean considerMaps) {
if (considerMaps) {
LOG.info("Will STOP/RESUME tasktrackers based on Maps'" +
" progress");
} else {
LOG.info("Will STOP/RESUME tasktrackers based on " +
"Reduces' progress");
}
LOG.info("Initial progress threshold: " + threshold +
". Threshold Multiplier: " + thresholdMultiplier +
". Number of iterations: " + numIterations);
float thresholdVal = threshold;
int numIterationsDone = 0;
while (!killed) {
try {
float progress;
if (jc.getJob(rJob.getID()).isComplete() ||
numIterationsDone == numIterations) {
break;
}
if (considerMaps) {
progress = jc.getJob(rJob.getID()).mapProgress();
} else {
progress = jc.getJob(rJob.getID()).reduceProgress();
}
if (progress >= thresholdVal) {
numIterationsDone++;
ClusterStatus c;
stopTaskTrackers((c = jc.getClusterStatus(true)));
Thread.sleep((int)Math.ceil(1.5 * c.getTTExpiryInterval()));
startTaskTrackers();
thresholdVal = thresholdVal * thresholdMultiplier;
}
Thread.sleep(5000);
} catch (InterruptedException ie) {
killed = true;
return;
} catch (Exception e) {
LOG.fatal(StringUtils.stringifyException(e));
}
}
}
private void stopTaskTrackers(ClusterStatus c) throws Exception {
Collection <String> trackerNames = c.getActiveTrackerNames();
ArrayList<String> trackerNamesList = new ArrayList<String>(trackerNames);
Collections.shuffle(trackerNamesList);
int count = 0;
FileOutputStream fos = new FileOutputStream(new File(slavesFile));
LOG.info(new Date() + " Stopping a few trackers");
for (String tracker : trackerNamesList) {
String host = convertTrackerNameToHostName(tracker);
LOG.info(new Date() + " Marking tracker on host: " + host);
fos.write((host + "\n").getBytes());
if (count++ >= trackerNamesList.size()/2) {
break;
}
}
fos.close();
runOperationOnTT("suspend");
}
private void startTaskTrackers() throws Exception {
LOG.info(new Date() + " Resuming the stopped trackers");
runOperationOnTT("resume");
new File(slavesFile).delete();
}
private void runOperationOnTT(String operation) throws IOException {
Map<String,String> hMap = new HashMap<String,String>();
hMap.put("HADOOP_SLAVES", slavesFile);
StringTokenizer strToken;
if (operation.equals("suspend")) {
strToken = new StringTokenizer(STOP_COMMAND, " ");
} else {
strToken = new StringTokenizer(RESUME_COMMAND, " ");
}
String commandArgs[] = new String[strToken.countTokens() + 1];
int i = 0;
commandArgs[i++] = shellCommand;
while (strToken.hasMoreTokens()) {
commandArgs[i++] = strToken.nextToken();
}
String output = Shell.execCommand(hMap, commandArgs);
if (output != null && !output.equals("")) {
LOG.info(output);
}
}
private String convertTrackerNameToHostName(String trackerName) {
// Convert the trackerName to it's host name
int indexOfColon = trackerName.indexOf(":");
String trackerHostName = (indexOfColon == -1) ?
trackerName :
trackerName.substring(0, indexOfColon);
return trackerHostName.substring("tracker_".length());
}
}
private class KillTaskThread extends Thread {
private volatile boolean killed = false;
private RunningJob rJob;
private JobClient jc;
final private int thresholdMultiplier;
private float threshold = 0.2f;
private boolean onlyMapsProgress;
private int numIterations;
public KillTaskThread(JobClient jc, int thresholdMultiplier,
float threshold, boolean onlyMapsProgress, int numIterations) {
this.jc = jc;
this.thresholdMultiplier = thresholdMultiplier;
this.threshold = threshold;
this.onlyMapsProgress = onlyMapsProgress;
this.numIterations = numIterations;
setDaemon(true);
}
public void setRunningJob(RunningJob rJob) {
this.rJob = rJob;
}
public void kill() {
killed = true;
}
public void run() {
killBasedOnProgress(true);
if (!onlyMapsProgress) {
killBasedOnProgress(false);
}
}
private void killBasedOnProgress(boolean considerMaps) {
boolean fail = false;
if (considerMaps) {
LOG.info("Will kill tasks based on Maps' progress");
} else {
LOG.info("Will kill tasks based on Reduces' progress");
}
LOG.info("Initial progress threshold: " + threshold +
". Threshold Multiplier: " + thresholdMultiplier +
". Number of iterations: " + numIterations);
float thresholdVal = threshold;
int numIterationsDone = 0;
while (!killed) {
try {
float progress;
if (jc.getJob(rJob.getID()).isComplete() ||
numIterationsDone == numIterations) {
break;
}
if (considerMaps) {
progress = jc.getJob(rJob.getID()).mapProgress();
} else {
progress = jc.getJob(rJob.getID()).reduceProgress();
}
if (progress >= thresholdVal) {
numIterationsDone++;
if (numIterationsDone > 0 && numIterationsDone % 2 == 0) {
fail = true; //fail tasks instead of kill
}
ClusterStatus c = jc.getClusterStatus();
LOG.info(new Date() + " Killing a few tasks");
Collection<TaskAttemptID> runningTasks =
new ArrayList<TaskAttemptID>();
TaskReport mapReports[] = jc.getMapTaskReports(rJob.getID());
for (TaskReport mapReport : mapReports) {
if (mapReport.getCurrentStatus() == TIPStatus.RUNNING) {
runningTasks.addAll(mapReport.getRunningTaskAttempts());
}
}
if (runningTasks.size() > c.getTaskTrackers()/2) {
int count = 0;
for (TaskAttemptID t : runningTasks) {
LOG.info(new Date() + " Killed task : " + t);
rJob.killTask(t, fail);
if (count++ > runningTasks.size()/2) { //kill 50%
break;
}
}
}
runningTasks.clear();
TaskReport reduceReports[] = jc.getReduceTaskReports(rJob.getID());
for (TaskReport reduceReport : reduceReports) {
if (reduceReport.getCurrentStatus() == TIPStatus.RUNNING) {
runningTasks.addAll(reduceReport.getRunningTaskAttempts());
}
}
if (runningTasks.size() > c.getTaskTrackers()/2) {
int count = 0;
for (TaskAttemptID t : runningTasks) {
LOG.info(new Date() + " Killed task : " + t);
rJob.killTask(t, fail);
if (count++ > runningTasks.size()/2) { //kill 50%
break;
}
}
}
thresholdVal = thresholdVal * thresholdMultiplier;
}
Thread.sleep(5000);
} catch (InterruptedException ie) {
killed = true;
} catch (Exception e) {
LOG.fatal(StringUtils.stringifyException(e));
}
}
}
}
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new ReliabilityTest(), args);
System.exit(res);
}
}
| 18,984 | 36.372047 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Iterator;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.apache.hadoop.mapreduce.MRConfig;
public class TestJavaSerialization extends TestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp")).toURI()
.toString().replace(' ', '+');
private final Path INPUT_DIR = new Path(TEST_ROOT_DIR + "/input");
private final Path OUTPUT_DIR = new Path(TEST_ROOT_DIR + "/out");
private final Path INPUT_FILE = new Path(INPUT_DIR , "inp");
static class WordCountMapper extends MapReduceBase implements
Mapper<LongWritable, Text, String, Long> {
public void map(LongWritable key, Text value,
OutputCollector<String, Long> output, Reporter reporter)
throws IOException {
StringTokenizer st = new StringTokenizer(value.toString());
while (st.hasMoreTokens()) {
String token = st.nextToken();
assertTrue("Invalid token; expected 'a' or 'b', got " + token,
token.equals("a") || token.equals("b"));
output.collect(token, 1L);
}
}
}
static class SumReducer<K> extends MapReduceBase implements
Reducer<K, Long, K, Long> {
public void reduce(K key, Iterator<Long> values,
OutputCollector<K, Long> output, Reporter reporter)
throws IOException {
long sum = 0;
while (values.hasNext()) {
sum += values.next();
}
output.collect(key, sum);
}
}
private void cleanAndCreateInput(FileSystem fs) throws IOException {
fs.delete(INPUT_FILE, true);
fs.delete(OUTPUT_DIR, true);
OutputStream os = fs.create(INPUT_FILE);
Writer wr = new OutputStreamWriter(os);
wr.write("b a\n");
wr.close();
}
public void testMapReduceJob() throws Exception {
JobConf conf = new JobConf(TestJavaSerialization.class);
conf.setJobName("JavaSerialization");
FileSystem fs = FileSystem.get(conf);
cleanAndCreateInput(fs);
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization," +
"org.apache.hadoop.io.serializer.WritableSerialization");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(String.class);
conf.setOutputValueClass(Long.class);
conf.setOutputKeyComparatorClass(JavaSerializationComparator.class);
conf.setMapperClass(WordCountMapper.class);
conf.setReducerClass(SumReducer.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
FileInputFormat.setInputPaths(conf, INPUT_DIR);
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
String inputFileContents =
FileUtils.readFileToString(new File(INPUT_FILE.toUri().getPath()));
assertTrue("Input file contents not as expected; contents are '"
+ inputFileContents + "', expected \"b a\n\" ",
inputFileContents.equals("b a\n"));
JobClient.runJob(conf);
Path[] outputFiles =
FileUtil.stat2Paths(fs.listStatus(OUTPUT_DIR,
new Utils.OutputFileUtils.OutputFilesFilter()));
assertEquals(1, outputFiles.length);
InputStream is = fs.open(outputFiles[0]);
String reduceOutput = org.apache.commons.io.IOUtils.toString(is);
String[] lines = reduceOutput.split(System.getProperty("line.separator"));
assertEquals("Unexpected output; received output '" + reduceOutput + "'",
"a\t1", lines[0]);
assertEquals("Unexpected output; received output '" + reduceOutput + "'",
"b\t1", lines[1]);
assertEquals("Reduce output has extra lines; output is '" + reduceOutput
+ "'", 2, lines.length);
is.close();
}
/**
* HADOOP-4466:
* This test verifies the JavSerialization impl can write to
* SequenceFiles. by virtue other SequenceFileOutputFormat is not
* coupled to Writable types, if so, the job will fail.
*
*/
public void testWriteToSequencefile() throws Exception {
JobConf conf = new JobConf(TestJavaSerialization.class);
conf.setJobName("JavaSerialization");
FileSystem fs = FileSystem.get(conf);
cleanAndCreateInput(fs);
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization," +
"org.apache.hadoop.io.serializer.WritableSerialization");
conf.setInputFormat(TextInputFormat.class);
// test we can write to sequence files
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.setOutputKeyClass(String.class);
conf.setOutputValueClass(Long.class);
conf.setOutputKeyComparatorClass(JavaSerializationComparator.class);
conf.setMapperClass(WordCountMapper.class);
conf.setReducerClass(SumReducer.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
FileInputFormat.setInputPaths(conf, INPUT_DIR);
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
fs.listStatus(OUTPUT_DIR,
new Utils.OutputFileUtils.OutputFilesFilter()));
assertEquals(1, outputFiles.length);
}
}
| 6,384 | 32.962766 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/**
* Tests for ClientServiceDelegate.java
*/
@RunWith(value = Parameterized.class)
public class TestClientServiceDelegate {
private JobID oldJobId = JobID.forName("job_1315895242400_2");
private org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter
.toYarn(oldJobId);
private boolean isAMReachableFromClient;
public TestClientServiceDelegate(boolean isAMReachableFromClient) {
this.isAMReachableFromClient = isAMReachableFromClient;
}
@Parameters
public static Collection<Object[]> data() {
Object[][] data = new Object[][] { { true }, { false } };
return Arrays.asList(data);
}
@Test
public void testUnknownAppInRM() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(
getJobReportResponse());
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, getRMDelegate());
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
}
@Test
public void testRemoteExceptionFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(
new IOException("Job ID doesnot Exist"));
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, rm);
try {
clientServiceDelegate.getJobStatus(oldJobId);
Assert.fail("Invoke should throw exception after retries.");
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains(
"Job ID doesnot Exist"));
}
}
@Test
public void testRetriesOnConnectionFailure() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(
new RuntimeException("1")).thenThrow(new RuntimeException("2"))
.thenReturn(getJobReportResponse());
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, rm);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
verify(historyServerProxy, times(3)).getJobReport(
any(GetJobReportRequest.class));
}
@Test
public void testRetriesOnAMConnectionFailures() throws Exception {
if (!isAMReachableFromClient) {
return;
}
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(getRunningApplicationReport("am1", 78));
// throw exception in 1st, 2nd, 3rd and 4th call of getJobReport, and
// succeed in the 5th call.
final MRClientProtocol amProxy = mock(MRClientProtocol.class);
when(amProxy.getJobReport(any(GetJobReportRequest.class)))
.thenThrow(new RuntimeException("11"))
.thenThrow(new RuntimeException("22"))
.thenThrow(new RuntimeException("33"))
.thenThrow(new RuntimeException("44")).thenReturn(getJobReportResponse());
Configuration conf = new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
!isAMReachableFromClient);
ClientServiceDelegate clientServiceDelegate =
new ClientServiceDelegate(conf, rm, oldJobId, null) {
@Override
MRClientProtocol instantiateAMProxy(
final InetSocketAddress serviceAddr) throws IOException {
super.instantiateAMProxy(serviceAddr);
return amProxy;
}
};
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
// assert maxClientRetry is not decremented.
Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES), clientServiceDelegate
.getMaxClientRetry());
verify(amProxy, times(5)).getJobReport(any(GetJobReportRequest.class));
}
@Test
public void testNoRetryOnAMAuthorizationException() throws Exception {
if (!isAMReachableFromClient) {
return;
}
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(getRunningApplicationReport("am1", 78));
// throw authorization exception on first invocation
final MRClientProtocol amProxy = mock(MRClientProtocol.class);
when(amProxy.getJobReport(any(GetJobReportRequest.class)))
.thenThrow(new AuthorizationException("Denied"));
Configuration conf = new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
!isAMReachableFromClient);
ClientServiceDelegate clientServiceDelegate =
new ClientServiceDelegate(conf, rm, oldJobId, null) {
@Override
MRClientProtocol instantiateAMProxy(
final InetSocketAddress serviceAddr) throws IOException {
super.instantiateAMProxy(serviceAddr);
return amProxy;
}
};
try {
clientServiceDelegate.getJobStatus(oldJobId);
Assert.fail("Exception should be thrown upon AuthorizationException");
} catch (IOException e) {
Assert.assertEquals(AuthorizationException.class.getName() + ": Denied",
e.getMessage());
}
// assert maxClientRetry is not decremented.
Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES), clientServiceDelegate
.getMaxClientRetry());
verify(amProxy, times(1)).getJobReport(any(GetJobReportRequest.class));
}
@Test
public void testHistoryServerNotConfigured() throws Exception {
//RM doesn't have app report and job History Server is not configured
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
null, getRMDelegate());
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals("N/A", jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.PREP, jobStatus.getState());
//RM has app report and job History Server is not configured
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
ApplicationReport applicationReport = getFinishedApplicationReport();
when(rm.getApplicationReport(jobId.getAppId())).thenReturn(
applicationReport);
clientServiceDelegate = getClientServiceDelegate(null, rm);
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals(applicationReport.getUser(), jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.SUCCEEDED, jobStatus.getState());
}
@Test
public void testJobReportFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(
getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, rm);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("TestJobFilePath", jobStatus.getJobFile());
Assert.assertEquals("http://TestTrackingUrl", jobStatus.getTrackingUrl());
Assert.assertEquals(1.0f, jobStatus.getMapProgress(), 0.0f);
Assert.assertEquals(1.0f, jobStatus.getReduceProgress(), 0.0f);
}
@Test
public void testCountersFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getCounters(getCountersRequest())).thenReturn(
getCountersResponseFromHistoryServer());
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, rm);
Counters counters = TypeConverter.toYarn(clientServiceDelegate.getJobCounters(oldJobId));
Assert.assertNotNull(counters);
Assert.assertEquals(1001, counters.getCounterGroup("dummyCounters").getCounter("dummyCounter").getValue());
}
@Test
public void testReconnectOnAMRestart() throws IOException {
//test not applicable when AM not reachable
//as instantiateAMProxy is not called at all
if(!isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
// RM returns AM1 url, null, null and AM2 url on invocations.
// Nulls simulate the time when AM2 is in the process of restarting.
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(
getRunningApplicationReport("am1", 78)).thenReturn(
getRunningApplicationReport(null, 0)).thenReturn(
getRunningApplicationReport(null, 0)).thenReturn(
getRunningApplicationReport("am2", 90));
} catch (YarnException e) {
throw new IOException(e);
}
GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class);
when(jobReportResponse1.getJobReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user",
JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
false, ""));
// First AM returns a report with jobName firstGen and simulates AM shutdown
// on second invocation.
MRClientProtocol firstGenAMProxy = mock(MRClientProtocol.class);
when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class)))
.thenReturn(jobReportResponse1).thenThrow(
new RuntimeException("AM is down!"));
GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class);
when(jobReportResponse2.getJobReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user",
JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null,
false, ""));
// Second AM generation returns a report with jobName secondGen
MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class);
when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class)))
.thenReturn(jobReportResponse2);
ClientServiceDelegate clientServiceDelegate = spy(getClientServiceDelegate(
historyServerProxy, rmDelegate));
// First time, connection should be to AM1, then to AM2. Further requests
// should use the same proxy to AM2 and so instantiateProxy shouldn't be
// called.
doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(
clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-firstGen", jobStatus.getJobName());
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
verify(clientServiceDelegate, times(2)).instantiateAMProxy(
any(InetSocketAddress.class));
}
@Test
public void testAMAccessDisabled() throws IOException {
//test only applicable when AM not reachable
if(isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(
getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(
getRunningApplicationReport("am1", 78)).thenReturn(
getRunningApplicationReport("am1", 78)).thenReturn(
getRunningApplicationReport("am1", 78)).thenReturn(
getFinishedApplicationReport());
} catch (YarnException e) {
throw new IOException(e);
}
ClientServiceDelegate clientServiceDelegate = spy(getClientServiceDelegate(
historyServerProxy, rmDelegate));
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
any(InetSocketAddress.class));
// Should not reach AM even for second and third times too.
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
any(InetSocketAddress.class));
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
any(InetSocketAddress.class));
// The third time around, app is completed, so should go to JHS
JobStatus jobStatus1 = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus1);
Assert.assertEquals("TestJobFilePath", jobStatus1.getJobFile());
Assert.assertEquals("http://TestTrackingUrl", jobStatus1.getTrackingUrl());
Assert.assertEquals(1.0f, jobStatus1.getMapProgress(), 0.0f);
Assert.assertEquals(1.0f, jobStatus1.getReduceProgress(), 0.0f);
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
any(InetSocketAddress.class));
}
@Test
public void testRMDownForJobStatusBeforeGetAMReport() throws IOException {
Configuration conf = new YarnConfiguration();
testRMDownForJobStatusBeforeGetAMReport(conf,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
}
@Test
public void testRMDownForJobStatusBeforeGetAMReportWithRetryTimes()
throws IOException {
Configuration conf = new YarnConfiguration();
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 2);
testRMDownForJobStatusBeforeGetAMReport(conf, conf.getInt(
MRJobConfig.MR_CLIENT_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES));
}
@Test
public void testRMDownRestoreForJobStatusBeforeGetAMReport()
throws IOException {
Configuration conf = new YarnConfiguration();
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 3);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
!isAMReachableFromClient);
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(any(GetJobReportRequest.class)))
.thenReturn(getJobReportResponse());
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced1"))).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced2")))
.thenReturn(getFinishedApplicationReport());
ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(
conf, rmDelegate, oldJobId, historyServerProxy);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
verify(rmDelegate, times(3)).getApplicationReport(
any(ApplicationId.class));
Assert.assertNotNull(jobStatus);
} catch (YarnException e) {
throw new IOException(e);
}
}
private void testRMDownForJobStatusBeforeGetAMReport(Configuration conf,
int noOfRetries) throws IOException {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
!isAMReachableFromClient);
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced1"))).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced2"))).thenThrow(
new java.lang.reflect.UndeclaredThrowableException(new IOException(
"Connection refuced3")));
ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(
conf, rmDelegate, oldJobId, historyServerProxy);
try {
clientServiceDelegate.getJobStatus(oldJobId);
Assert.fail("It should throw exception after retries");
} catch (IOException e) {
System.out.println("fail to get job status,and e=" + e.toString());
}
verify(rmDelegate, times(noOfRetries)).getApplicationReport(
any(ApplicationId.class));
} catch (YarnException e) {
throw new IOException(e);
}
}
private GetJobReportRequest getJobReportRequest() {
GetJobReportRequest request = Records.newRecord(GetJobReportRequest.class);
request.setJobId(jobId);
return request;
}
private GetJobReportResponse getJobReportResponse() {
GetJobReportResponse jobReportResponse = Records
.newRecord(GetJobReportResponse.class);
JobReport jobReport = Records.newRecord(JobReport.class);
jobReport.setJobId(jobId);
jobReport.setJobState(JobState.SUCCEEDED);
jobReportResponse.setJobReport(jobReport);
return jobReportResponse;
}
private GetCountersRequest getCountersRequest() {
GetCountersRequest request = Records.newRecord(GetCountersRequest.class);
request.setJobId(jobId);
return request;
}
private ApplicationReport getFinishedApplicationReport() {
ApplicationId appId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
appId, 0);
return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
"appname", "host", 124, null, YarnApplicationState.FINISHED,
"diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
"N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
}
private ApplicationReport getRunningApplicationReport(String host, int port) {
ApplicationId appId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
appId, 0);
return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
"appname", host, port, null, YarnApplicationState.RUNNING, "diagnostics",
"url", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
}
private ResourceMgrDelegate getRMDelegate() throws IOException {
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
try {
ApplicationId appId = jobId.getAppId();
when(rm.getApplicationReport(appId)).
thenThrow(new ApplicationNotFoundException(appId + " not found"));
} catch (YarnException e) {
throw new IOException(e);
}
return rm;
}
private ClientServiceDelegate getClientServiceDelegate(
MRClientProtocol historyServerProxy, ResourceMgrDelegate rm) {
Configuration conf = new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, !isAMReachableFromClient);
ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(
conf, rm, oldJobId, historyServerProxy);
return clientServiceDelegate;
}
private GetJobReportResponse getJobReportResponseFromHistoryServer() {
GetJobReportResponse jobReportResponse = Records
.newRecord(GetJobReportResponse.class);
JobReport jobReport = Records.newRecord(JobReport.class);
jobReport.setJobId(jobId);
jobReport.setJobState(JobState.SUCCEEDED);
jobReport.setMapProgress(1.0f);
jobReport.setReduceProgress(1.0f);
jobReport.setJobFile("TestJobFilePath");
jobReport.setTrackingUrl("http://TestTrackingUrl");
jobReportResponse.setJobReport(jobReport);
return jobReportResponse;
}
private GetCountersResponse getCountersResponseFromHistoryServer() {
GetCountersResponse countersResponse = Records
.newRecord(GetCountersResponse.class);
Counter counter = Records.newRecord(Counter.class);
CounterGroup counterGroup = Records.newRecord(CounterGroup.class);
Counters counters = Records.newRecord(Counters.class);
counter.setDisplayName("dummyCounter");
counter.setName("dummyCounter");
counter.setValue(1001);
counterGroup.setName("dummyCounters");
counterGroup.setDisplayName("dummyCounters");
counterGroup.setCounter("dummyCounter", counter);
counters.setCounterGroup("dummyCounters", counterGroup);
countersResponse.setCounters(counters);
return countersResponse;
}
}
| 26,089 | 43.598291 | 142 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceFetch.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.mapreduce.TaskCounter;
public class TestReduceFetch extends TestReduceFetchFromPartialMem {
static {
setSuite(TestReduceFetch.class);
}
/**
* Verify that all segments are read from disk
* @throws Exception might be thrown
*/
public void testReduceFromDisk() throws Exception {
final int MAP_TASKS = 8;
JobConf job = mrCluster.createJobConf();
job.set(JobContext.REDUCE_INPUT_BUFFER_PERCENT, "0.0");
job.setNumMapTasks(MAP_TASKS);
job.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, "-Xmx128m");
job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "0.05");
job.setInt(JobContext.IO_SORT_FACTOR, 2);
job.setInt(JobContext.REDUCE_MERGE_INMEM_THRESHOLD, 4);
Counters c = runJob(job);
final long spill = c.findCounter(TaskCounter.SPILLED_RECORDS).getCounter();
final long out = c.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getCounter();
assertTrue("Expected all records spilled during reduce (" + spill + ")",
spill >= 2 * out); // all records spill at map, reduce
assertTrue("Expected intermediate merges (" + spill + ")",
spill >= 2 * out + (out / MAP_TASKS)); // some records hit twice
}
/**
* Verify that no segment hits disk.
* @throws Exception might be thrown
*/
public void testReduceFromMem() throws Exception {
final int MAP_TASKS = 3;
JobConf job = mrCluster.createJobConf();
job.set(JobContext.REDUCE_INPUT_BUFFER_PERCENT, "1.0");
job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "1.0");
job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
job.setNumMapTasks(MAP_TASKS);
Counters c = runJob(job);
final long spill = c.findCounter(TaskCounter.SPILLED_RECORDS).getCounter();
final long out = c.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getCounter();
assertEquals("Spilled records: " + spill, out, spill); // no reduce spill
}
}
| 2,821 | 39.898551 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
public class TestKeyValueTextInputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestKeyValueTextInputFormat.class.getName());
private static int MAX_LENGTH = 10000;
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestKeyValueTextInputFormat");
public void testFormat() throws Exception {
JobConf job = new JobConf();
Path file = new Path(workDir, "test.txt");
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
int seed = new Random().nextInt();
LOG.info("seed = "+seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
LOG.debug("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i*2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
format.configure(job);
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH/20)+1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
LOG.debug("splitting: got = " + splits.length);
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
LOG.debug("split["+j+"]= " + splits[j]);
RecordReader<Text, Text> reader =
format.getRecordReader(splits[j], job, reporter);
Class readerClass = reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.", KeyValueLineRecordReader.class, readerClass);
Text key = reader.createKey();
Class keyClass = key.getClass();
Text value = reader.createValue();
Class valueClass = value.getClass();
assertEquals("Key class is Text.", Text.class, keyClass);
assertEquals("Value class is Text.", Text.class, valueClass);
try {
int count = 0;
while (reader.next(key, value)) {
int v = Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v +
" in split " + j +
" at position "+reader.getPos());
}
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits["+j+"]="+splits[j]+" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
private LineReader makeStream(String str) throws IOException {
return new LineReader(new ByteArrayInputStream
(str.getBytes("UTF-8")),
defaultConf);
}
public void testUTF8() throws Exception {
LineReader in = null;
try {
in = makeStream("abcd\u20acbdcd\u20ac");
Text line = new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters",
"abcd\u20acbdcd\u20ac", line.toString());
in = makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline", "abc\u200axyz", line.toString());
} finally {
if (in != null) {
in.close();
}
}
}
public void testNewLines() throws Exception {
LineReader in = null;
try {
in = makeStream("a\nbb\n\nccc\rdddd\r\neeeee");
Text out = new Text();
in.readLine(out);
assertEquals("line1 length", 1, out.getLength());
in.readLine(out);
assertEquals("line2 length", 2, out.getLength());
in.readLine(out);
assertEquals("line3 length", 0, out.getLength());
in.readLine(out);
assertEquals("line4 length", 3, out.getLength());
in.readLine(out);
assertEquals("line5 length", 4, out.getLength());
in.readLine(out);
assertEquals("line5 length", 5, out.getLength());
assertEquals("end of file", 0, in.readLine(out));
} finally {
if (in != null) {
in.close();
}
}
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static final Reporter voidReporter = Reporter.NULL;
private static List<Text> readSplit(KeyValueTextInputFormat format,
InputSplit split,
JobConf job) throws IOException {
List<Text> result = new ArrayList<Text>();
RecordReader<Text, Text> reader = null;
try {
reader = format.getRecordReader(split, job, voidReporter);
Text key = reader.createKey();
Text value = reader.createValue();
while (reader.next(key, value)) {
result.add(value);
value = (Text) reader.createValue();
}
} finally {
if (reader != null) {
reader.close();
}
}
return result;
}
/**
* Test using the gzip codec for reading
*/
public static void testGzip() throws IOException {
JobConf job = new JobConf();
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, job);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"line-1\tthe quick\nline-2\tbrown\nline-3\tfox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"line-1\tthis is a test\nline-1\tof gzip\n");
FileInputFormat.setInputPaths(job, workDir);
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
format.configure(job);
InputSplit[] splits = format.getSplits(job, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0] = splits[1];
splits[1] = tmp;
}
List<Text> results = readSplit(format, splits[0], job);
assertEquals("splits[0] length", 6, results.size());
assertEquals("splits[0][5]", " dog", results.get(5).toString());
results = readSplit(format, splits[1], job);
assertEquals("splits[1] length", 2, results.size());
assertEquals("splits[1][0]", "this is a test",
results.get(0).toString());
assertEquals("splits[1][1]", "of gzip",
results.get(1).toString());
}
public static void main(String[] args) throws Exception {
new TestKeyValueTextInputFormat().testFormat();
}
}
| 8,986 | 34.105469 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
/*
* A simple interface for a client MR cluster used for testing. This interface
* provides basic methods which are independent of the underlying Mini Cluster (
* either through MR1 or MR2).
*/
public interface MiniMRClientCluster {
public void start() throws IOException;
/**
* Stop and start back the cluster using the same configuration.
*/
public void restart() throws IOException;
public void stop() throws IOException;
public Configuration getConfig() throws IOException;
}
| 1,423 | 31.363636 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
public class TestJobName extends ClusterMapReduceTestCase {
public void testComplexName() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("b a\n");
wr.close();
JobConf conf = createJobConf();
conf.setJobName("[name][some other value that gets truncated internally that this test attempts to aggravate]");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(IdentityMapper.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(getOutputDir(),
new Utils.OutputFileUtils.OutputFilesFilter()));
assertEquals(1, outputFiles.length);
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
assertEquals("0\tb a", reader.readLine());
assertNull(reader.readLine());
reader.close();
}
public void testComplexNameWithRegex() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("b a\n");
wr.close();
JobConf conf = createJobConf();
conf.setJobName("name \\Evalue]");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(IdentityMapper.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(getOutputDir(),
new Utils.OutputFileUtils.OutputFilesFilter()));
assertEquals(1, outputFiles.length);
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
assertEquals("0\tb a", reader.readLine());
assertNull(reader.readLine());
reader.close();
}
}
| 3,590 | 34.205882 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
public class TestResourceMgrDelegate {
/**
* Tests that getRootQueues makes a request for the (recursive) child queues
* @throws IOException
*/
@Test
public void testGetRootQueues() throws IOException, InterruptedException {
final ApplicationClientProtocol applicationsManager = Mockito.mock(ApplicationClientProtocol.class);
GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(response.getQueueInfo()).thenReturn(queueInfo);
try {
Mockito.when(applicationsManager.getQueueInfo(Mockito.any(
GetQueueInfoRequest.class))).thenReturn(response);
} catch (YarnException e) {
throw new IOException(e);
}
ResourceMgrDelegate delegate = new ResourceMgrDelegate(
new YarnConfiguration()) {
@Override
protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(applicationsManager);
}
};
delegate.getRootQueues();
ArgumentCaptor<GetQueueInfoRequest> argument =
ArgumentCaptor.forClass(GetQueueInfoRequest.class);
try {
Mockito.verify(applicationsManager).getQueueInfo(
argument.capture());
} catch (YarnException e) {
throw new IOException(e);
}
Assert.assertTrue("Children of root queue not requested",
argument.getValue().getIncludeChildQueues());
Assert.assertTrue("Request wasn't to recurse through children",
argument.getValue().getRecursive());
}
@Test
public void tesAllJobs() throws Exception {
final ApplicationClientProtocol applicationsManager = Mockito.mock(ApplicationClientProtocol.class);
GetApplicationsResponse allApplicationsResponse = Records
.newRecord(GetApplicationsResponse.class);
List<ApplicationReport> applications = new ArrayList<ApplicationReport>();
applications.add(getApplicationReport(YarnApplicationState.FINISHED,
FinalApplicationStatus.FAILED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,
FinalApplicationStatus.SUCCEEDED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,
FinalApplicationStatus.KILLED));
applications.add(getApplicationReport(YarnApplicationState.FAILED,
FinalApplicationStatus.FAILED));
allApplicationsResponse.setApplicationList(applications);
Mockito.when(
applicationsManager.getApplications(Mockito
.any(GetApplicationsRequest.class))).thenReturn(
allApplicationsResponse);
ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(
new YarnConfiguration()) {
@Override
protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(applicationsManager);
}
};
JobStatus[] allJobs = resourceMgrDelegate.getAllJobs();
Assert.assertEquals(State.FAILED, allJobs[0].getState());
Assert.assertEquals(State.SUCCEEDED, allJobs[1].getState());
Assert.assertEquals(State.KILLED, allJobs[2].getState());
Assert.assertEquals(State.FAILED, allJobs[3].getState());
}
private ApplicationReport getApplicationReport(
YarnApplicationState yarnApplicationState,
FinalApplicationStatus finalApplicationStatus) {
ApplicationReport appReport = Mockito.mock(ApplicationReport.class);
ApplicationResourceUsageReport appResources = Mockito
.mock(ApplicationResourceUsageReport.class);
Mockito.when(appReport.getApplicationId()).thenReturn(
ApplicationId.newInstance(0, 0));
Mockito.when(appResources.getNeededResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appResources.getReservedResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appResources.getUsedResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appReport.getApplicationResourceUsageReport()).thenReturn(
appResources);
Mockito.when(appReport.getYarnApplicationState()).thenReturn(
yarnApplicationState);
Mockito.when(appReport.getFinalApplicationStatus()).thenReturn(
finalApplicationStatus);
return appReport;
}
}
| 6,512 | 41.848684 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReporter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Tests the old mapred APIs with {@link Reporter#getProgress()}.
*/
public class TestReporter {
private static final Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp"));
private static final Path testRootTempDir =
new Path(rootTempDir, "TestReporter");
private static FileSystem fs = null;
@BeforeClass
public static void setup() throws Exception {
fs = FileSystem.getLocal(new Configuration());
fs.delete(testRootTempDir, true);
fs.mkdirs(testRootTempDir);
}
@AfterClass
public static void cleanup() throws Exception {
fs.delete(testRootTempDir, true);
}
// an input with 4 lines
private static final String INPUT = "Hi\nHi\nHi\nHi\n";
private static final int INPUT_LINES = INPUT.split("\n").length;
@SuppressWarnings("deprecation")
static class ProgressTesterMapper extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private float progressRange = 0;
private int numRecords = 0;
private Reporter reporter = null;
@Override
public void configure(JobConf job) {
super.configure(job);
// set the progress range accordingly
if (job.getNumReduceTasks() == 0) {
progressRange = 1f;
} else {
progressRange = 0.667f;
}
}
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
this.reporter = reporter;
// calculate the actual map progress
float mapProgress = ((float)++numRecords)/INPUT_LINES;
// calculate the attempt progress based on the progress range
float attemptProgress = progressRange * mapProgress;
assertEquals("Invalid progress in map",
attemptProgress, reporter.getProgress(), 0f);
output.collect(new Text(value.toString() + numRecords), value);
}
@Override
public void close() throws IOException {
super.close();
assertEquals("Invalid progress in map cleanup",
progressRange, reporter.getProgress(), 0f);
}
}
static class StatusLimitMapper extends
org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException {
StringBuilder sb = new StringBuilder(512);
for (int i = 0; i < 1000; i++) {
sb.append("a");
}
context.setStatus(sb.toString());
int progressStatusLength = context.getConfiguration().getInt(
MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);
if (context.getStatus().length() > progressStatusLength) {
throw new IOException("Status is not truncated");
}
}
}
/**
* Test {@link Reporter}'s progress for a map-only job.
* This will make sure that only the map phase decides the attempt's progress.
*/
@SuppressWarnings("deprecation")
@Test
public void testReporterProgressForMapOnlyJob() throws IOException {
Path test = new Path(testRootTempDir, "testReporterProgressForMapOnlyJob");
JobConf conf = new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setMapOutputKeyClass(Text.class);
// fail early
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(0);
RunningJob job =
UtilsForTests.runJob(conf, new Path(test, "in"), new Path(test, "out"),
1, 0, INPUT);
job.waitForCompletion();
assertTrue("Job failed", job.isSuccessful());
}
/**
* A {@link Reducer} implementation that checks the progress on every call
* to {@link Reducer#reduce(Object, Iterator, OutputCollector, Reporter)}.
*/
@SuppressWarnings("deprecation")
static class ProgressTestingReducer extends MapReduceBase
implements Reducer<Text, Text, Text, Text> {
private int recordCount = 0;
private Reporter reporter = null;
// reduce task has a fixed split of progress amongst copy, shuffle and
// reduce phases.
private final float REDUCE_PROGRESS_RANGE = 1.0f/3;
private final float SHUFFLE_PROGRESS_RANGE = 1 - REDUCE_PROGRESS_RANGE;
@Override
public void configure(JobConf job) {
super.configure(job);
}
@Override
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
float reducePhaseProgress = ((float)++recordCount)/INPUT_LINES;
float weightedReducePhaseProgress =
reducePhaseProgress * REDUCE_PROGRESS_RANGE;
assertEquals("Invalid progress in reduce",
SHUFFLE_PROGRESS_RANGE + weightedReducePhaseProgress,
reporter.getProgress(), 0.02f);
this.reporter = reporter;
}
@Override
public void close() throws IOException {
super.close();
assertEquals("Invalid progress in reduce cleanup",
1.0f, reporter.getProgress(), 0f);
}
}
/**
* Test {@link Reporter}'s progress for map-reduce job.
*/
@Test
public void testReporterProgressForMRJob() throws IOException {
Path test = new Path(testRootTempDir, "testReporterProgressForMRJob");
JobConf conf = new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setReducerClass(ProgressTestingReducer.class);
conf.setMapOutputKeyClass(Text.class);
// fail early
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(1);
RunningJob job =
UtilsForTests.runJob(conf, new Path(test, "in"), new Path(test, "out"),
1, 1, INPUT);
job.waitForCompletion();
assertTrue("Job failed", job.isSuccessful());
}
@Test
public void testStatusLimit() throws IOException, InterruptedException,
ClassNotFoundException {
Path test = new Path(testRootTempDir, "testStatusLimit");
Configuration conf = new Configuration();
Path inDir = new Path(test, "in");
Path outDir = new Path(test, "out");
FileSystem fs = FileSystem.get(conf);
if (fs.exists(inDir)) {
fs.delete(inDir, true);
}
fs.mkdirs(inDir);
DataOutputStream file = fs.create(new Path(inDir, "part-" + 0));
file.writeBytes("testStatusLimit");
file.close();
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
Job job = Job.getInstance(conf, "testStatusLimit");
job.setMapperClass(StatusLimitMapper.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
}
}
| 8,247 | 31.992 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSpecialCharactersInOutputPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.Progressable;
/**
* A JUnit test to test that jobs' output filenames are not HTML-encoded (cf HADOOP-1795).
*/
public class TestSpecialCharactersInOutputPath extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSpecialCharactersInOutputPath.class.getName());
private static final String OUTPUT_FILENAME = "result[0]";
public static boolean launchJob(URI fileSys,
JobConf conf,
int numMaps,
int numReduces) throws IOException {
final Path inDir = new Path("/testing/input");
final Path outDir = new Path("/testing/output");
FileSystem fs = FileSystem.get(fileSys, conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
LOG.warn("Can't create " + inDir);
return false;
}
// generate an input file
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("foo foo2 foo3");
file.close();
// use WordCount example
FileSystem.setDefaultUri(conf, fileSys);
conf.setJobName("foo");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(SpecialTextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
// run job and wait for completion
RunningJob runningJob = JobClient.runJob(conf);
try {
assertTrue(runningJob.isComplete());
assertTrue(runningJob.isSuccessful());
assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
} catch (NullPointerException npe) {
// This NPE should no more happens
fail("A NPE should not have happened.");
}
// return job result
LOG.info("job is complete: " + runningJob.isSuccessful());
return (runningJob.isSuccessful());
}
public void testJobWithDFS() throws IOException {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
final int taskTrackers = 4;
final int jobTrackerPort = 60050;
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 2);
JobConf jobConf = new JobConf();
boolean result;
result = launchJob(fileSys.getUri(), jobConf, 3, 1);
assertTrue(result);
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown(); }
}
}
/** generates output filenames with special characters */
static class SpecialTextOutputFormat<K,V> extends TextOutputFormat<K,V> {
@Override
public RecordWriter<K,V> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress) throws IOException {
return super.getRecordWriter(ignored, job, OUTPUT_FILENAME, progress);
}
}
}
| 4,851 | 35.481203 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLineRecordReaderJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.junit.Test;
public class TestLineRecordReaderJobs {
private static Path workDir = new Path(new Path(System.getProperty(
"test.build.data", "."), "data"), "TestTextInputFormat");
private static Path inputDir = new Path(workDir, "input");
private static Path outputDir = new Path(workDir, "output");
/**
* Writes the input test file
*
* @param conf
* @throws IOException
*/
public void createInputFile(Configuration conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(inputDir, "test.txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
writer.write("abc\ndef\t\nghi\njkl");
writer.close();
}
/**
* Reads the output file into a string
*
* @param conf
* @return
* @throws IOException
*/
public String readOutputFile(Configuration conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(outputDir, "part-00000");
return UtilsForTests.slurpHadoop(file, localFs);
}
/**
* Creates and runs an MR job
*
* @param conf
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
public void createAndRunJob(Configuration conf) throws IOException,
InterruptedException, ClassNotFoundException {
JobConf job = new JobConf(conf);
job.setJarByClass(TestLineRecordReaderJobs.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
FileInputFormat.addInputPath(job, inputDir);
FileOutputFormat.setOutputPath(job, outputDir);
JobClient.runJob(job);
}
/**
* Test the case when a custom record delimiter is specified using the
* textinputformat.record.delimiter configuration property
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
public void testCustomRecordDelimiters() throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
conf.set("textinputformat.record.delimiter", "\t\n");
conf.setInt("mapreduce.job.maps", 1);
FileSystem localFs = FileSystem.getLocal(conf);
// cleanup
localFs.delete(workDir, true);
// creating input test file
createInputFile(conf);
createAndRunJob(conf);
String expected = "0\tabc\ndef\n9\tghi\njkl\n";
assertEquals(expected, readOutputFile(conf));
}
/**
* Test the default behavior when the textinputformat.record.delimiter
* configuration property is not specified
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
public void testDefaultRecordDelimiters() throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
// cleanup
localFs.delete(workDir, true);
// creating input test file
createInputFile(conf);
createAndRunJob(conf);
String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
assertEquals(expected, readOutputFile(conf));
}
}
| 4,382 | 32.204545 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestStatisticsCollector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.mapred.StatisticsCollector.TimeWindow;
import org.apache.hadoop.mapred.StatisticsCollector.Stat;
public class TestStatisticsCollector extends TestCase{
@SuppressWarnings("rawtypes")
public void testMovingWindow() throws Exception {
StatisticsCollector collector = new StatisticsCollector(1);
TimeWindow window = new TimeWindow("test", 6, 2);
TimeWindow sincStart = StatisticsCollector.SINCE_START;
TimeWindow[] windows = {sincStart, window};
Stat stat = collector.createStat("m1", windows);
stat.inc(3);
collector.update();
assertEquals(0, stat.getValues().get(window).getValue());
assertEquals(3, stat.getValues().get(sincStart).getValue());
stat.inc(3);
collector.update();
assertEquals((3+3), stat.getValues().get(window).getValue());
assertEquals(6, stat.getValues().get(sincStart).getValue());
stat.inc(10);
collector.update();
assertEquals((3+3), stat.getValues().get(window).getValue());
assertEquals(16, stat.getValues().get(sincStart).getValue());
stat.inc(10);
collector.update();
assertEquals((3+3+10+10), stat.getValues().get(window).getValue());
assertEquals(26, stat.getValues().get(sincStart).getValue());
stat.inc(10);
collector.update();
stat.inc(10);
collector.update();
assertEquals((3+3+10+10+10+10), stat.getValues().get(window).getValue());
assertEquals(46, stat.getValues().get(sincStart).getValue());
stat.inc(10);
collector.update();
assertEquals((3+3+10+10+10+10), stat.getValues().get(window).getValue());
assertEquals(56, stat.getValues().get(sincStart).getValue());
stat.inc(12);
collector.update();
assertEquals((10+10+10+10+10+12), stat.getValues().get(window).getValue());
assertEquals(68, stat.getValues().get(sincStart).getValue());
stat.inc(13);
collector.update();
assertEquals((10+10+10+10+10+12), stat.getValues().get(window).getValue());
assertEquals(81, stat.getValues().get(sincStart).getValue());
stat.inc(14);
collector.update();
assertEquals((10+10+10+12+13+14), stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue());
// test Stat class
Map updaters= collector.getUpdaters();
assertEquals(updaters.size(),2);
Map<String, Stat> ststistics=collector.getStatistics();
assertNotNull(ststistics.get("m1"));
Stat newStat= collector.createStat("m2");
assertEquals(newStat.name, "m2");
Stat st=collector.removeStat("m1");
assertEquals(st.name, "m1");
assertEquals((10+10+10+12+13+14), stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue());
st=collector.removeStat("m1");
// try to remove stat again
assertNull(st);
collector.start();
// waiting 2,5 sec
Thread.sleep(2500);
assertEquals(69, stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue());
}
}
| 3,980 | 35.522936 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileAsTextInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "test.seq");
Reporter reporter = Reporter.NULL;
int seed = new Random().nextInt();
//LOG.info("seed = "+seed);
Random random = new Random(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
//LOG.info("creating; entries = " + length);
// create a file with length entries
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
IntWritable.class, LongWritable.class);
try {
for (int i = 0; i < length; i++) {
IntWritable key = new IntWritable(i);
LongWritable value = new LongWritable(10 * i);
writer.append(key, value);
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
InputFormat<Text, Text> format =
new SequenceFileAsTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits =
random.nextInt(MAX_LENGTH/(SequenceFile.SYNC_INTERVAL/20))+1;
//LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
//LOG.info("splitting: got = " + splits.length);
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
RecordReader<Text, Text> reader =
format.getRecordReader(splits[j], job, reporter);
Class readerClass = reader.getClass();
assertEquals("reader class is SequenceFileAsTextRecordReader.", SequenceFileAsTextRecordReader.class, readerClass);
Text value = reader.createValue();
Text key = reader.createKey();
try {
int count = 0;
while (reader.next(key, value)) {
// if (bits.get(key.get())) {
// LOG.info("splits["+j+"]="+splits[j]+" : " + key.get());
// LOG.info("@"+reader.getPos());
// }
int keyInt = Integer.parseInt(key.toString());
assertFalse("Key in multiple partitions.", bits.get(keyInt));
bits.set(keyInt);
count++;
}
//LOG.info("splits["+j+"]="+splits[j]+" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
public static void main(String[] args) throws Exception {
new TestSequenceFileAsTextInputFormat().testFormat();
}
}
| 4,206 | 34.058333 | 133 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRConfig;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Iterator;
public class TestFileOutputFormat extends HadoopTestCase {
public TestFileOutputFormat() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testCustomFile() throws Exception {
Path inDir = new Path("testing/fileoutputformat/input");
Path outDir = new Path("testing/fileoutputformat/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
file = fs.create(new Path(inDir, "part-1"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
conf.setJobName("fof");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(TestMap.class);
conf.setReducerClass(TestReduce.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
assertTrue(job.isSuccessful());
boolean map0 = false;
boolean map1 = false;
boolean reduce = false;
FileStatus[] statuses = fs.listStatus(outDir);
for (FileStatus status : statuses) {
map0 = map0 || status.getPath().getName().equals("test-m-00000");
map1 = map1 || status.getPath().getName().equals("test-m-00001");
reduce = reduce || status.getPath().getName().equals("test-r-00000");
}
assertTrue(map0);
assertTrue(map1);
assertTrue(reduce);
}
public static class TestMap implements Mapper<LongWritable, Text,
LongWritable, Text> {
public void configure(JobConf conf) {
try {
FileSystem fs = FileSystem.get(conf);
OutputStream os =
fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
os.write(1);
os.close();
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public void map(LongWritable key, Text value,
OutputCollector<LongWritable, Text> output,
Reporter reporter) throws IOException {
output.collect(key, value);
}
public void close() throws IOException {
}
}
public static class TestReduce implements Reducer<LongWritable, Text,
LongWritable, Text> {
public void configure(JobConf conf) {
try {
FileSystem fs = FileSystem.get(conf);
OutputStream os =
fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
os.write(1);
os.close();
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output,
Reporter reporter) throws IOException {
while (values.hasNext()) {
Text value = values.next();
output.collect(key, value);
}
}
public void close() throws IOException {
}
}
}
| 5,046 | 29.77439 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.lib.CombineFileSplit;
import org.apache.hadoop.mapred.lib.CombineTextInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
public class TestCombineTextInputFormat {
private static final Log LOG =
LogFactory.getLog(TestCombineTextInputFormat.class);
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
@SuppressWarnings("deprecation")
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestCombineTextInputFormat").makeQualified(localFs);
// A reporter that does nothing
private static final Reporter voidReporter = Reporter.NULL;
@Test(timeout=10000)
public void testFormat() throws Exception {
JobConf job = new JobConf(defaultConf);
Random random = new Random();
long seed = random.nextLong();
LOG.info("seed = "+seed);
random.setSeed(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int length = 10000;
final int numFiles = 10;
createFiles(length, numFiles, random);
// create a combined split for the files
CombineTextInputFormat format = new CombineTextInputFormat();
LongWritable key = new LongWritable();
Text value = new Text();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(length/20)+1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
LOG.info("splitting: got = " + splits.length);
// we should have a single split as the length is comfortably smaller than
// the block size
assertEquals("We got more than one splits!", 1, splits.length);
InputSplit split = splits[0];
assertEquals("It should be CombineFileSplit",
CombineFileSplit.class, split.getClass());
// check the split
BitSet bits = new BitSet(length);
LOG.debug("split= " + split);
RecordReader<LongWritable, Text> reader =
format.getRecordReader(split, job, voidReporter);
try {
int count = 0;
while (reader.next(key, value)) {
int v = Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v +
" at position "+reader.getPos());
}
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.info("splits="+split+" count=" + count);
} finally {
reader.close();
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
private static class Range {
private final int start;
private final int end;
Range(int start, int end) {
this.start = start;
this.end = end;
}
@Override
public String toString() {
return "(" + start + ", " + end + ")";
}
}
private static Range[] createRanges(int length, int numFiles, Random random) {
// generate a number of files with various lengths
Range[] ranges = new Range[numFiles];
for (int i = 0; i < numFiles; i++) {
int start = i == 0 ? 0 : ranges[i-1].end;
int end = i == numFiles - 1 ?
length :
(length/numFiles)*(2*i + 1)/2 + random.nextInt(length/numFiles) + 1;
ranges[i] = new Range(start, end);
}
return ranges;
}
private static void createFiles(int length, int numFiles, Random random)
throws IOException {
Range[] ranges = createRanges(length, numFiles, random);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(workDir, "test_" + i + ".txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
Range range = ranges[i];
try {
for (int j = range.start; j < range.end; j++) {
writer.write(Integer.toString(j));
writer.write("\n");
}
} finally {
writer.close();
}
}
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static List<Text> readSplit(InputFormat<LongWritable,Text> format,
InputSplit split,
JobConf job) throws IOException {
List<Text> result = new ArrayList<Text>();
RecordReader<LongWritable, Text> reader =
format.getRecordReader(split, job, voidReporter);
LongWritable key = reader.createKey();
Text value = reader.createValue();
while (reader.next(key, value)) {
result.add(value);
value = reader.createValue();
}
reader.close();
return result;
}
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000)
public void testGzip() throws IOException {
JobConf job = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, job);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job, workDir);
CombineTextInputFormat format = new CombineTextInputFormat();
InputSplit[] splits = format.getSplits(job, 100);
assertEquals("compressed splits == 1", 1, splits.length);
List<Text> results = readSplit(format, splits[0], job);
assertEquals("splits[0] length", 8, results.size());
final String[] firstList =
{"the quick", "brown", "fox jumped", "over", " the lazy", " dog"};
final String[] secondList = {"this is a test", "of gzip"};
String first = results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results, firstList, secondList);
} else if (first.equals(secondList[0])) {
testResults(results, secondList, firstList);
} else {
fail("unexpected first token!");
}
}
private static void testResults(List<Text> results, String[] first,
String[] second) {
for (int i = 0; i < first.length; i++) {
assertEquals("splits[0]["+i+"]", first[i], results.get(i).toString());
}
for (int i = 0; i < second.length; i++) {
int j = i + first.length;
assertEquals("splits[0]["+j+"]", second[i], results.get(j).toString());
}
}
}
| 8,502 | 32.876494 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/HadoopTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import java.io.File;
import java.io.IOException;
/**
* Abstract Test case class to run MR in local or cluster mode and in local FS
* or DFS.
*
* The Hadoop instance is started and stopped on each test method.
*
* If using DFS the filesystem is reformated at each start (test method).
*
* Job Configurations should be created using a configuration returned by the
* 'createJobConf()' method.
*/
public abstract class HadoopTestCase extends TestCase {
public static final int LOCAL_MR = 1;
public static final int CLUSTER_MR = 2;
public static final int LOCAL_FS = 4;
public static final int DFS_FS = 8;
private boolean localMR;
private boolean localFS;
private int taskTrackers;
private int dataNodes;
/**
* Creates a testcase for local or cluster MR using DFS.
*
* The DFS will be formatted regardless if there was one or not before in the
* given location.
*
* @param mrMode indicates if the MR should be local (LOCAL_MR) or cluster
* (CLUSTER_MR)
* @param fsMode indicates if the FS should be local (LOCAL_FS) or DFS (DFS_FS)
*
* local FS when using relative PATHs)
*
* @param taskTrackers number of task trackers to start when using cluster
*
* @param dataNodes number of data nodes to start when using DFS
*
* @throws IOException thrown if the base directory cannot be set.
*/
public HadoopTestCase(int mrMode, int fsMode, int taskTrackers, int dataNodes)
throws IOException {
if (mrMode != LOCAL_MR && mrMode != CLUSTER_MR) {
throw new IllegalArgumentException(
"Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR");
}
if (fsMode != LOCAL_FS && fsMode != DFS_FS) {
throw new IllegalArgumentException(
"Invalid FileSystem mode, must be LOCAL_FS or DFS_FS");
}
if (taskTrackers < 1) {
throw new IllegalArgumentException(
"Invalid taskTrackers value, must be greater than 0");
}
if (dataNodes < 1) {
throw new IllegalArgumentException(
"Invalid dataNodes value, must be greater than 0");
}
localMR = (mrMode == LOCAL_MR);
localFS = (fsMode == LOCAL_FS);
/*
JobConf conf = new JobConf();
fsRoot = conf.get("hadoop.tmp.dir");
if (fsRoot == null) {
throw new IllegalArgumentException(
"hadoop.tmp.dir is not defined");
}
fsRoot = fsRoot.replace(' ', '+') + "/fs";
File file = new File(fsRoot);
if (!file.exists()) {
if (!file.mkdirs()) {
throw new RuntimeException("Could not create FS base path: " + file);
}
}
*/
this.taskTrackers = taskTrackers;
this.dataNodes = dataNodes;
}
/**
* Indicates if the MR is running in local or cluster mode.
*
* @return returns TRUE if the MR is running locally, FALSE if running in
* cluster mode.
*/
public boolean isLocalMR() {
return localMR;
}
/**
* Indicates if the filesystem is local or DFS.
*
* @return returns TRUE if the filesystem is local, FALSE if it is DFS.
*/
public boolean isLocalFS() {
return localFS;
}
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
private FileSystem fileSystem = null;
/**
* Creates Hadoop instance based on constructor configuration before
* a test case is run.
*
* @throws Exception
*/
protected void setUp() throws Exception {
super.setUp();
if (localFS) {
fileSystem = FileSystem.getLocal(new JobConf());
}
else {
dfsCluster = new MiniDFSCluster.Builder(new JobConf())
.numDataNodes(dataNodes).build();
fileSystem = dfsCluster.getFileSystem();
}
if (localMR) {
}
else {
//noinspection deprecation
mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1);
}
}
/**
* Destroys Hadoop instance based on constructor configuration after
* a test case is run.
*
* @throws Exception
*/
protected void tearDown() throws Exception {
try {
if (mrCluster != null) {
mrCluster.shutdown();
}
}
catch (Exception ex) {
System.out.println(ex);
}
try {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
catch (Exception ex) {
System.out.println(ex);
}
super.tearDown();
}
/**
* Returns the Filesystem in use.
*
* TestCases should use this Filesystem as it
* is properly configured with the workingDir for relative PATHs.
*
* @return the filesystem used by Hadoop.
*/
protected FileSystem getFileSystem() {
return fileSystem;
}
/**
* Returns a job configuration preconfigured to run against the Hadoop
* managed by the testcase.
* @return configuration that works on the testcase Hadoop instance
*/
protected JobConf createJobConf() {
if (localMR) {
JobConf conf = new JobConf();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
return conf;
}
else {
return mrCluster.createJobConf();
}
}
}
| 6,242 | 27.902778 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleTextOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.lib.*;
public class TestMultipleTextOutputFormat extends TestCase {
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
// A random task attempt id for testing.
private static String attempt = "attempt_200707121733_0001_m_000000_0";
private static Path workDir =
new Path(new Path(
new Path(System.getProperty("test.build.data", "."),
"data"),
FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
private static void writeData(RecordWriter<Text, Text> rw) throws IOException {
for (int i = 10; i < 40; i++) {
String k = "" + i;
String v = "" + i;
rw.write(new Text(k), new Text(v));
}
}
static class KeyBasedMultipleTextOutputFormat extends MultipleTextOutputFormat<Text, Text> {
protected String generateFileNameForKeyValue(Text key, Text v, String name) {
return key.toString().substring(0, 1) + "-" + name;
}
}
private static void test1(JobConf job) throws IOException {
FileSystem fs = FileSystem.getLocal(job);
String name = "part-00000";
KeyBasedMultipleTextOutputFormat theOutputFormat = new KeyBasedMultipleTextOutputFormat();
RecordWriter<Text, Text> rw = theOutputFormat.getRecordWriter(fs, job, name, null);
writeData(rw);
rw.close(null);
}
private static void test2(JobConf job) throws IOException {
FileSystem fs = FileSystem.getLocal(job);
String name = "part-00000";
//pretend that we have input file with 1/2/3 as the suffix
job.set(JobContext.MAP_INPUT_FILE, "1/2/3");
// we use the last two legs of the input file as the output file
job.set("mapred.outputformat.numOfTrailingLegs", "2");
MultipleTextOutputFormat<Text, Text> theOutputFormat = new MultipleTextOutputFormat<Text, Text>();
RecordWriter<Text, Text> rw = theOutputFormat.getRecordWriter(fs, job, name, null);
writeData(rw);
rw.close(null);
}
public void testFormat() throws Exception {
JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
//System.out.printf("workdir: %s\n", workDir.toString());
TestMultipleTextOutputFormat.test1(job);
TestMultipleTextOutputFormat.test2(job);
String file_11 = "1-part-00000";
File expectedFile_11 = new File(new Path(workDir, file_11).toString());
//System.out.printf("expectedFile_11: %s\n", new Path(workDir, file_11).toString());
StringBuffer expectedOutput = new StringBuffer();
for (int i = 10; i < 20; i++) {
expectedOutput.append(""+i).append('\t').append(""+i).append("\n");
}
String output = UtilsForTests.slurp(expectedFile_11);
//System.out.printf("File_2 output: %s\n", output);
assertEquals(output, expectedOutput.toString());
String file_12 = "2-part-00000";
File expectedFile_12 = new File(new Path(workDir, file_12).toString());
//System.out.printf("expectedFile_12: %s\n", new Path(workDir, file_12).toString());
expectedOutput = new StringBuffer();
for (int i = 20; i < 30; i++) {
expectedOutput.append(""+i).append('\t').append(""+i).append("\n");
}
output = UtilsForTests.slurp(expectedFile_12);
//System.out.printf("File_2 output: %s\n", output);
assertEquals(output, expectedOutput.toString());
String file_13 = "3-part-00000";
File expectedFile_13 = new File(new Path(workDir, file_13).toString());
//System.out.printf("expectedFile_13: %s\n", new Path(workDir, file_13).toString());
expectedOutput = new StringBuffer();
for (int i = 30; i < 40; i++) {
expectedOutput.append(""+i).append('\t').append(""+i).append("\n");
}
output = UtilsForTests.slurp(expectedFile_13);
//System.out.printf("File_2 output: %s\n", output);
assertEquals(output, expectedOutput.toString());
String file_2 = "2/3";
File expectedFile_2 = new File(new Path(workDir, file_2).toString());
//System.out.printf("expectedFile_2: %s\n", new Path(workDir, file_2).toString());
expectedOutput = new StringBuffer();
for (int i = 10; i < 40; i++) {
expectedOutput.append(""+i).append('\t').append(""+i).append("\n");
}
output = UtilsForTests.slurp(expectedFile_2);
//System.out.printf("File_2 output: %s\n", output);
assertEquals(output, expectedOutput.toString());
}
public static void main(String[] args) throws Exception {
new TestMultipleTextOutputFormat().testFormat();
}
}
| 5,941 | 37.836601 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/EmptyInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
/**
* InputFormat which simulates the absence of input data
* by returning zero split.
*/
public class EmptyInputFormat<K, V> implements InputFormat<K, V> {
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
return new InputSplit[0];
}
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
return new RecordReader<K,V>() {
public boolean next(K key, V value) throws IOException { return false; }
public K createKey() { return null; }
public V createValue() { return null; }
public long getPos() throws IOException { return 0L; }
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
};
}
}
| 1,735 | 36.73913 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.extensions.TestSetup;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.MRConfig;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Formatter;
import java.util.Iterator;
public class TestReduceFetchFromPartialMem extends TestCase {
protected static MiniMRCluster mrCluster = null;
protected static MiniDFSCluster dfsCluster = null;
protected static TestSuite mySuite;
protected static void setSuite(Class<? extends TestCase> klass) {
mySuite = new TestSuite(klass);
}
static {
setSuite(TestReduceFetchFromPartialMem.class);
}
public static Test suite() {
TestSetup setup = new TestSetup(mySuite) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
mrCluster = new MiniMRCluster(2,
dfsCluster.getFileSystem().getUri().toString(), 1);
}
protected void tearDown() throws Exception {
if (dfsCluster != null) { dfsCluster.shutdown(); }
if (mrCluster != null) { mrCluster.shutdown(); }
}
};
return setup;
}
private static final String tagfmt = "%04d";
private static final String keyfmt = "KEYKEYKEYKEYKEYKEYKE";
private static final int keylen = keyfmt.length();
private static int getValLen(int id, int nMaps) {
return 4096 / nMaps * (id + 1);
}
/** Verify that at least one segment does not hit disk */
public void testReduceFromPartialMem() throws Exception {
final int MAP_TASKS = 7;
JobConf job = mrCluster.createJobConf();
job.setNumMapTasks(MAP_TASKS);
job.setInt(JobContext.REDUCE_MERGE_INMEM_THRESHOLD, 0);
job.set(JobContext.REDUCE_INPUT_BUFFER_PERCENT, "1.0");
job.setInt(JobContext.SHUFFLE_PARALLEL_COPIES, 1);
job.setInt(JobContext.IO_SORT_MB, 10);
job.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, "-Xmx128m");
job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "0.14");
job.set(JobContext.SHUFFLE_MERGE_PERCENT, "1.0");
Counters c = runJob(job);
final long out = c.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getCounter();
final long spill = c.findCounter(TaskCounter.SPILLED_RECORDS).getCounter();
assertTrue("Expected some records not spilled during reduce" + spill + ")",
spill < 2 * out); // spilled map records, some records at the reduce
}
/**
* Emit 4096 small keys, 2 "tagged" keys. Emits a fixed amount of
* data so the in-memory fetch semantics can be tested.
*/
public static class MapMB implements
Mapper<NullWritable,NullWritable,Text,Text> {
private int id;
private int nMaps;
private final Text key = new Text();
private final Text val = new Text();
private final byte[] b = new byte[4096];
private final Formatter fmt = new Formatter(new StringBuilder(25));
@Override
public void configure(JobConf conf) {
nMaps = conf.getNumMapTasks();
id = nMaps - conf.getInt(JobContext.TASK_PARTITION, -1) - 1;
Arrays.fill(b, 0, 4096, (byte)'V');
((StringBuilder)fmt.out()).append(keyfmt);
}
@Override
public void map(NullWritable nk, NullWritable nv,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
// Emit 4096 fixed-size records
val.set(b, 0, 1000);
val.getBytes()[0] = (byte) id;
for (int i = 0; i < 4096; ++i) {
key.set(fmt.format(tagfmt, i).toString());
output.collect(key, val);
((StringBuilder)fmt.out()).setLength(keylen);
}
// Emit two "tagged" records from the map. To validate the merge, segments
// should have both a small and large record such that reading a large
// record from an on-disk segment into an in-memory segment will write
// over the beginning of a record in the in-memory segment, causing the
// merge and/or validation to fail.
// Add small, tagged record
val.set(b, 0, getValLen(id, nMaps) - 128);
val.getBytes()[0] = (byte) id;
((StringBuilder)fmt.out()).setLength(keylen);
key.set("A" + fmt.format(tagfmt, id).toString());
output.collect(key, val);
// Add large, tagged record
val.set(b, 0, getValLen(id, nMaps));
val.getBytes()[0] = (byte) id;
((StringBuilder)fmt.out()).setLength(keylen);
key.set("B" + fmt.format(tagfmt, id).toString());
output.collect(key, val);
}
@Override
public void close() throws IOException { }
}
/**
* Confirm that each small key is emitted once by all maps, each tagged key
* is emitted by only one map, all IDs are consistent with record data, and
* all non-ID record data is consistent.
*/
public static class MBValidate
implements Reducer<Text,Text,Text,Text> {
private static int nMaps;
private static final Text vb = new Text();
static {
byte[] v = new byte[4096];
Arrays.fill(v, (byte)'V');
vb.set(v);
}
private int nRec = 0;
private int nKey = -1;
private int aKey = -1;
private int bKey = -1;
private final Text kb = new Text();
private final Formatter fmt = new Formatter(new StringBuilder(25));
@Override
public void configure(JobConf conf) {
nMaps = conf.getNumMapTasks();
((StringBuilder)fmt.out()).append(keyfmt);
}
@Override
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text,Text> out, Reporter reporter)
throws IOException {
int vc = 0;
final int vlen;
final int preRec = nRec;
final int vcCheck, recCheck;
((StringBuilder)fmt.out()).setLength(keylen);
if (25 == key.getLength()) {
// tagged record
recCheck = 1; // expect only 1 record
switch ((char)key.getBytes()[0]) {
case 'A':
vlen = getValLen(++aKey, nMaps) - 128;
vcCheck = aKey; // expect eq id
break;
case 'B':
vlen = getValLen(++bKey, nMaps);
vcCheck = bKey; // expect eq id
break;
default:
vlen = vcCheck = -1;
fail("Unexpected tag on record: " + ((char)key.getBytes()[24]));
}
kb.set((char)key.getBytes()[0] + fmt.format(tagfmt,vcCheck).toString());
} else {
kb.set(fmt.format(tagfmt, ++nKey).toString());
vlen = 1000;
recCheck = nMaps; // expect 1 rec per map
vcCheck = (nMaps * (nMaps - 1)) >>> 1; // expect eq sum(id)
}
assertEquals(kb, key);
while (values.hasNext()) {
final Text val = values.next();
// increment vc by map ID assoc w/ val
vc += val.getBytes()[0];
// verify that all the fixed characters 'V' match
assertEquals(0, WritableComparator.compareBytes(
vb.getBytes(), 1, vlen - 1,
val.getBytes(), 1, val.getLength() - 1));
out.collect(key, val);
++nRec;
}
assertEquals("Bad rec count for " + key, recCheck, nRec - preRec);
assertEquals("Bad rec group for " + key, vcCheck, vc);
}
@Override
public void close() throws IOException {
assertEquals(4095, nKey);
assertEquals(nMaps - 1, aKey);
assertEquals(nMaps - 1, bKey);
assertEquals("Bad record count", nMaps * (4096 + 2), nRec);
}
}
public static class FakeSplit implements InputSplit {
public void write(DataOutput out) throws IOException { }
public void readFields(DataInput in) throws IOException { }
public long getLength() { return 0L; }
public String[] getLocations() { return new String[0]; }
}
public static class FakeIF
implements InputFormat<NullWritable,NullWritable> {
public FakeIF() { }
public InputSplit[] getSplits(JobConf conf, int numSplits) {
InputSplit[] splits = new InputSplit[numSplits];
for (int i = 0; i < splits.length; ++i) {
splits[i] = new FakeSplit();
}
return splits;
}
public RecordReader<NullWritable,NullWritable> getRecordReader(
InputSplit ignored, JobConf conf, Reporter reporter) {
return new RecordReader<NullWritable,NullWritable>() {
private boolean done = false;
public boolean next(NullWritable key, NullWritable value)
throws IOException {
if (done)
return false;
done = true;
return true;
}
public NullWritable createKey() { return NullWritable.get(); }
public NullWritable createValue() { return NullWritable.get(); }
public long getPos() throws IOException { return 0L; }
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
};
}
}
public static Counters runJob(JobConf conf) throws Exception {
conf.setMapperClass(MapMB.class);
conf.setReducerClass(MBValidate.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setNumReduceTasks(1);
conf.setInputFormat(FakeIF.class);
conf.setNumTasksToExecutePerJvm(1);
conf.setInt(JobContext.MAP_MAX_ATTEMPTS, 0);
conf.setInt(JobContext.REDUCE_MAX_ATTEMPTS, 0);
FileInputFormat.setInputPaths(conf, new Path("/in"));
final Path outp = new Path("/out");
FileOutputFormat.setOutputPath(conf, outp);
RunningJob job = null;
try {
job = JobClient.runJob(conf);
assertTrue(job.isSuccessful());
} finally {
FileSystem fs = dfsCluster.getFileSystem();
if (fs.exists(outp)) {
fs.delete(outp, true);
}
}
return job.getCounters();
}
}
| 11,070 | 34.483974 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Appender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.SimpleLayout;
import org.apache.log4j.WriterAppender;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Test YarnRunner and make sure the client side plugin works
* fine
*/
public class TestYARNRunner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestYARNRunner.class);
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
// prefix before <LOG_DIR>/profile.out
private static final String PROFILE_PARAMS =
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.substring(0,
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%"));
private YARNRunner yarnRunner;
private ResourceMgrDelegate resourceMgrDelegate;
private YarnConfiguration conf;
private ClientCache clientCache;
private ApplicationId appId;
private JobID jobId;
private File testWorkDir =
new File("target", TestYARNRunner.class.getName());
private ApplicationSubmissionContext submissionContext;
private ClientServiceDelegate clientDelegate;
private static final String failString = "Rejected job";
@Before
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
clientCache = new ClientCache(conf, resourceMgrDelegate);
clientCache = spy(clientCache);
yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
yarnRunner = spy(yarnRunner);
submissionContext = mock(ApplicationSubmissionContext.class);
doAnswer(
new Answer<ApplicationSubmissionContext>() {
@Override
public ApplicationSubmissionContext answer(InvocationOnMock invocation)
throws Throwable {
return submissionContext;
}
}
).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class),
any(String.class), any(Credentials.class));
appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
jobId = TypeConverter.fromYarn(appId);
if (testWorkDir.exists()) {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
}
testWorkDir.mkdirs();
}
@After
public void cleanup() {
FileUtil.fullyDelete(testWorkDir);
}
@Test(timeout=20000)
public void testJobKill() throws Exception {
clientDelegate = mock(ClientServiceDelegate.class);
when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
State.PREP, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
when(clientDelegate.killJob(any(JobID.class))).thenReturn(true);
doAnswer(
new Answer<ClientServiceDelegate>() {
@Override
public ClientServiceDelegate answer(InvocationOnMock invocation)
throws Throwable {
return clientDelegate;
}
}
).when(clientCache).getClient(any(JobID.class));
yarnRunner.killJob(jobId);
verify(resourceMgrDelegate).killApplication(appId);
when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
State.RUNNING, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
yarnRunner.killJob(jobId);
verify(clientDelegate).killJob(jobId);
when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(null);
when(resourceMgrDelegate.getApplicationReport(any(ApplicationId.class)))
.thenReturn(
ApplicationReport.newInstance(appId, null, "tmp", "tmp", "tmp",
"tmp", 0, null, YarnApplicationState.FINISHED, "tmp", "tmp",
0l, 0l, FinalApplicationStatus.SUCCEEDED, null, null, 0f,
"tmp", null));
yarnRunner.killJob(jobId);
verify(clientDelegate).killJob(jobId);
}
@Test(timeout=60000)
public void testJobKillTimeout() throws Exception {
long timeToWaitBeforeHardKill =
10000 + MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS;
conf.setLong(MRJobConfig.MR_AM_HARD_KILL_TIMEOUT_MS,
timeToWaitBeforeHardKill);
clientDelegate = mock(ClientServiceDelegate.class);
doAnswer(
new Answer<ClientServiceDelegate>() {
@Override
public ClientServiceDelegate answer(InvocationOnMock invocation)
throws Throwable {
return clientDelegate;
}
}
).when(clientCache).getClient(any(JobID.class));
when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
State.RUNNING, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
long startTimeMillis = System.currentTimeMillis();
yarnRunner.killJob(jobId);
assertTrue("killJob should have waited at least " + timeToWaitBeforeHardKill
+ " ms.", System.currentTimeMillis() - startTimeMillis
>= timeToWaitBeforeHardKill);
}
@Test(timeout=20000)
public void testJobSubmissionFailure() throws Exception {
when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).
thenReturn(appId);
ApplicationReport report = mock(ApplicationReport.class);
when(report.getApplicationId()).thenReturn(appId);
when(report.getDiagnostics()).thenReturn(failString);
when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.FAILED);
when(resourceMgrDelegate.getApplicationReport(appId)).thenReturn(report);
Credentials credentials = new Credentials();
File jobxml = new File(testWorkDir, "job.xml");
OutputStream out = new FileOutputStream(jobxml);
conf.writeXml(out);
out.close();
try {
yarnRunner.submitJob(jobId, testWorkDir.getAbsolutePath().toString(), credentials);
} catch(IOException io) {
LOG.info("Logging exception:", io);
assertTrue(io.getLocalizedMessage().contains(failString));
}
}
@Test(timeout=20000)
public void testResourceMgrDelegate() throws Exception {
/* we not want a mock of resource mgr delegate */
final ApplicationClientProtocol clientRMProtocol = mock(ApplicationClientProtocol.class);
ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf) {
@Override
protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(clientRMProtocol);
}
};
/* make sure kill calls finish application master */
when(clientRMProtocol.forceKillApplication(any(KillApplicationRequest.class)))
.thenReturn(KillApplicationResponse.newInstance(true));
delegate.killApplication(appId);
verify(clientRMProtocol).forceKillApplication(any(KillApplicationRequest.class));
/* make sure getalljobs calls get all applications */
when(clientRMProtocol.getApplications(any(GetApplicationsRequest.class))).
thenReturn(recordFactory.newRecordInstance(GetApplicationsResponse.class));
delegate.getAllJobs();
verify(clientRMProtocol).getApplications(any(GetApplicationsRequest.class));
/* make sure getapplication report is called */
when(clientRMProtocol.getApplicationReport(any(GetApplicationReportRequest.class)))
.thenReturn(recordFactory.newRecordInstance(GetApplicationReportResponse.class));
delegate.getApplicationReport(appId);
verify(clientRMProtocol).getApplicationReport(any(GetApplicationReportRequest.class));
/* make sure metrics is called */
GetClusterMetricsResponse clusterMetricsResponse = recordFactory.newRecordInstance
(GetClusterMetricsResponse.class);
clusterMetricsResponse.setClusterMetrics(recordFactory.newRecordInstance(
YarnClusterMetrics.class));
when(clientRMProtocol.getClusterMetrics(any(GetClusterMetricsRequest.class)))
.thenReturn(clusterMetricsResponse);
delegate.getClusterMetrics();
verify(clientRMProtocol).getClusterMetrics(any(GetClusterMetricsRequest.class));
when(clientRMProtocol.getClusterNodes(any(GetClusterNodesRequest.class))).
thenReturn(recordFactory.newRecordInstance(GetClusterNodesResponse.class));
delegate.getActiveTrackers();
verify(clientRMProtocol).getClusterNodes(any(GetClusterNodesRequest.class));
GetNewApplicationResponse newAppResponse = recordFactory.newRecordInstance(
GetNewApplicationResponse.class);
newAppResponse.setApplicationId(appId);
when(clientRMProtocol.getNewApplication(any(GetNewApplicationRequest.class))).
thenReturn(newAppResponse);
delegate.getNewJobID();
verify(clientRMProtocol).getNewApplication(any(GetNewApplicationRequest.class));
GetQueueInfoResponse queueInfoResponse = recordFactory.newRecordInstance(
GetQueueInfoResponse.class);
queueInfoResponse.setQueueInfo(recordFactory.newRecordInstance(QueueInfo.class));
when(clientRMProtocol.getQueueInfo(any(GetQueueInfoRequest.class))).
thenReturn(queueInfoResponse);
delegate.getQueues();
verify(clientRMProtocol).getQueueInfo(any(GetQueueInfoRequest.class));
GetQueueUserAclsInfoResponse aclResponse = recordFactory.newRecordInstance(
GetQueueUserAclsInfoResponse.class);
when(clientRMProtocol.getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class)))
.thenReturn(aclResponse);
delegate.getQueueAclsForCurrentUser();
verify(clientRMProtocol).getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class));
}
@Test(timeout=20000)
public void testGetHSDelegationToken() throws Exception {
try {
Configuration conf = new Configuration();
// Setup mock service
InetSocketAddress mockRmAddress = new InetSocketAddress("localhost", 4444);
Text rmTokenSevice = SecurityUtil.buildTokenService(mockRmAddress);
InetSocketAddress mockHsAddress = new InetSocketAddress("localhost", 9200);
Text hsTokenSevice = SecurityUtil.buildTokenService(mockHsAddress);
// Setup mock rm token
RMDelegationTokenIdentifier tokenIdentifier = new RMDelegationTokenIdentifier(
new Text("owner"), new Text("renewer"), new Text("real"));
Token<RMDelegationTokenIdentifier> token = new Token<RMDelegationTokenIdentifier>(
new byte[0], new byte[0], tokenIdentifier.getKind(), rmTokenSevice);
token.setKind(RMDelegationTokenIdentifier.KIND_NAME);
// Setup mock history token
org.apache.hadoop.yarn.api.records.Token historyToken =
org.apache.hadoop.yarn.api.records.Token.newInstance(new byte[0],
MRDelegationTokenIdentifier.KIND_NAME.toString(), new byte[0],
hsTokenSevice.toString());
GetDelegationTokenResponse getDtResponse =
Records.newRecord(GetDelegationTokenResponse.class);
getDtResponse.setDelegationToken(historyToken);
// mock services
MRClientProtocol mockHsProxy = mock(MRClientProtocol.class);
doReturn(mockHsAddress).when(mockHsProxy).getConnectAddress();
doReturn(getDtResponse).when(mockHsProxy).getDelegationToken(
any(GetDelegationTokenRequest.class));
ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
doReturn(rmTokenSevice).when(rmDelegate).getRMDelegationTokenService();
ClientCache clientCache = mock(ClientCache.class);
doReturn(mockHsProxy).when(clientCache).getInitializedHSProxy();
Credentials creds = new Credentials();
YARNRunner yarnRunner = new YARNRunner(conf, rmDelegate, clientCache);
// No HS token if no RM token
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
any(GetDelegationTokenRequest.class));
// No HS token if RM token, but secirity disabled.
creds.addToken(new Text("rmdt"), token);
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
any(GetDelegationTokenRequest.class));
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
creds = new Credentials();
// No HS token if no RM token, security enabled
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(0)).getDelegationToken(
any(GetDelegationTokenRequest.class));
// HS token if RM token present, security enabled
creds.addToken(new Text("rmdt"), token);
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(1)).getDelegationToken(
any(GetDelegationTokenRequest.class));
// No additional call to get HS token if RM and HS token present
yarnRunner.addHistoryToken(creds);
verify(mockHsProxy, times(1)).getDelegationToken(
any(GetDelegationTokenRequest.class));
} finally {
// Back to defaults.
UserGroupInformation.setConfiguration(new Configuration());
}
}
@Test(timeout=20000)
public void testHistoryServerToken() throws Exception {
//Set the master principal in the config
conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL");
final String masterPrincipal = Master.getMasterPrincipal(conf);
final MRClientProtocol hsProxy = mock(MRClientProtocol.class);
when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class))).thenAnswer(
new Answer<GetDelegationTokenResponse>() {
public GetDelegationTokenResponse answer(InvocationOnMock invocation) {
GetDelegationTokenRequest request =
(GetDelegationTokenRequest)invocation.getArguments()[0];
// check that the renewer matches the cluster's RM principal
assertEquals(masterPrincipal, request.getRenewer() );
org.apache.hadoop.yarn.api.records.Token token =
recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class);
// none of these fields matter for the sake of the test
token.setKind("");
token.setService("");
token.setIdentifier(ByteBuffer.allocate(0));
token.setPassword(ByteBuffer.allocate(0));
GetDelegationTokenResponse tokenResponse =
recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
tokenResponse.setDelegationToken(token);
return tokenResponse;
}
});
UserGroupInformation.createRemoteUser("someone").doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
yarnRunner = new YARNRunner(conf, null, null);
yarnRunner.getDelegationTokenFromHS(hsProxy);
verify(hsProxy).
getDelegationToken(any(GetDelegationTokenRequest.class));
return null;
}
});
}
@Test(timeout=20000)
public void testAMAdminCommandOpts() throws Exception {
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m");
YARNRunner yarnRunner = new YARNRunner(jobConf);
ApplicationSubmissionContext submissionContext =
buildSubmitContext(yarnRunner, jobConf);
ContainerLaunchContext containerSpec = submissionContext.getAMContainerSpec();
List<String> commands = containerSpec.getCommands();
int index = 0;
int adminIndex = 0;
int adminPos = -1;
int userIndex = 0;
int userPos = -1;
for(String command : commands) {
if(command != null) {
assertFalse("Profiler should be disabled by default",
command.contains(PROFILE_PARAMS));
adminPos = command.indexOf("-Djava.net.preferIPv4Stack=true");
if(adminPos >= 0)
adminIndex = index;
userPos = command.indexOf("-Xmx1024m");
if(userPos >= 0)
userIndex = index;
}
index++;
}
// Check both admin java opts and user java opts are in the commands
assertTrue("AM admin command opts not in the commands.", adminPos > 0);
assertTrue("AM user command opts not in the commands.", userPos > 0);
// Check the admin java opts is before user java opts in the commands
if(adminIndex == userIndex) {
assertTrue("AM admin command opts is after user command opts.", adminPos < userPos);
} else {
assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
}
}
@Test(timeout=20000)
public void testWarnCommandOpts() throws Exception {
Logger logger = Logger.getLogger(YARNRunner.class);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
Layout layout = new SimpleLayout();
Appender appender = new WriterAppender(layout, bout);
logger.addAppender(appender);
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
YARNRunner yarnRunner = new YARNRunner(jobConf);
@SuppressWarnings("unused")
ApplicationSubmissionContext submissionContext =
buildSubmitContext(yarnRunner, jobConf);
String logMsg = bout.toString();
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
"yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
"longer function if hadoop native libraries are used. These values " +
"should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
"env using yarn.app.mapreduce.am.admin.user.env config settings."));
assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
"yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
"function if hadoop native libraries are used. These values should " +
"be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
"using yarn.app.mapreduce.am.env config settings."));
}
@Test(timeout=20000)
public void testAMProfiler() throws Exception {
JobConf jobConf = new JobConf();
jobConf.setBoolean(MRJobConfig.MR_AM_PROFILE, true);
YARNRunner yarnRunner = new YARNRunner(jobConf);
ApplicationSubmissionContext submissionContext =
buildSubmitContext(yarnRunner, jobConf);
ContainerLaunchContext containerSpec = submissionContext.getAMContainerSpec();
List<String> commands = containerSpec.getCommands();
for(String command : commands) {
if (command != null) {
if (command.contains(PROFILE_PARAMS)) {
return;
}
}
}
throw new IllegalStateException("Profiler opts not found!");
}
@Test
public void testNodeLabelExp() throws Exception {
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.JOB_NODE_LABEL_EXP, "GPU");
jobConf.set(MRJobConfig.AM_NODE_LABEL_EXP, "highMem");
YARNRunner yarnRunner = new YARNRunner(jobConf);
ApplicationSubmissionContext appSubCtx =
buildSubmitContext(yarnRunner, jobConf);
assertEquals(appSubCtx.getNodeLabelExpression(), "GPU");
assertEquals(appSubCtx.getAMContainerResourceRequest()
.getNodeLabelExpression(), "highMem");
}
@Test
public void testAMStandardEnv() throws Exception {
final String ADMIN_LIB_PATH = "foo";
final String USER_LIB_PATH = "bar";
final String USER_SHELL = "shell";
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_USER_ENV, "LD_LIBRARY_PATH=" +
ADMIN_LIB_PATH);
jobConf.set(MRJobConfig.MR_AM_ENV, "LD_LIBRARY_PATH="
+ USER_LIB_PATH);
jobConf.set(MRJobConfig.MAPRED_ADMIN_USER_SHELL, USER_SHELL);
YARNRunner yarnRunner = new YARNRunner(jobConf);
ApplicationSubmissionContext appSubCtx =
buildSubmitContext(yarnRunner, jobConf);
// make sure PWD is first in the lib path
ContainerLaunchContext clc = appSubCtx.getAMContainerSpec();
Map<String, String> env = clc.getEnvironment();
String libPath = env.get(Environment.LD_LIBRARY_PATH.name());
assertNotNull("LD_LIBRARY_PATH not set", libPath);
String cps = jobConf.getBoolean(
MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
assertEquals("Bad AM LD_LIBRARY_PATH setting",
MRApps.crossPlatformifyMREnv(conf, Environment.PWD)
+ cps + ADMIN_LIB_PATH + cps + USER_LIB_PATH, libPath);
// make sure SHELL is set
String shell = env.get(Environment.SHELL.name());
assertNotNull("SHELL not set", shell);
assertEquals("Bad SHELL setting", USER_SHELL, shell);
}
private ApplicationSubmissionContext buildSubmitContext(
YARNRunner yarnRunner, JobConf jobConf) throws IOException {
File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE);
OutputStream out = new FileOutputStream(jobxml);
conf.writeXml(out);
out.close();
File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT);
out = new FileOutputStream(jobsplit);
out.close();
File jobsplitmetainfo = new File(testWorkDir,
MRJobConfig.JOB_SPLIT_METAINFO);
out = new FileOutputStream(jobsplitmetainfo);
out.close();
return yarnRunner.createApplicationSubmissionContext(jobConf,
testWorkDir.toString(), new Credentials());
}
}
| 26,746 | 42.070853 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "test.seq");
Reporter reporter = Reporter.NULL;
int seed = new Random().nextInt();
//LOG.info("seed = "+seed);
Random random = new Random(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
//LOG.info("creating; entries = " + length);
// create a file with length entries
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
IntWritable.class, BytesWritable.class);
try {
for (int i = 0; i < length; i++) {
IntWritable key = new IntWritable(i);
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
InputFormat<IntWritable, BytesWritable> format =
new SequenceFileInputFormat<IntWritable, BytesWritable>();
IntWritable key = new IntWritable();
BytesWritable value = new BytesWritable();
for (int i = 0; i < 3; i++) {
int numSplits =
random.nextInt(MAX_LENGTH/(SequenceFile.SYNC_INTERVAL/20))+1;
//LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
//LOG.info("splitting: got = " + splits.length);
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
RecordReader<IntWritable, BytesWritable> reader =
format.getRecordReader(splits[j], job, reporter);
try {
int count = 0;
while (reader.next(key, value)) {
// if (bits.get(key.get())) {
// LOG.info("splits["+j+"]="+splits[j]+" : " + key.get());
// LOG.info("@"+reader.getPos());
// }
assertFalse("Key in multiple partitions.", bits.get(key.get()));
bits.set(key.get());
count++;
}
//LOG.info("splits["+j+"]="+splits[j]+" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
public static void main(String[] args) throws Exception {
new TestSequenceFileInputFormat().testFormat();
}
}
| 4,098 | 33.737288 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSortedRanges.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.Iterator;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.SortedRanges.Range;
public class TestSortedRanges extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSortedRanges.class);
public void testAdd() {
SortedRanges sr = new SortedRanges();
sr.add(new Range(2,9));
assertEquals(9, sr.getIndicesCount());
sr.add(new SortedRanges.Range(3,5));
assertEquals(9, sr.getIndicesCount());
sr.add(new SortedRanges.Range(7,1));
assertEquals(9, sr.getIndicesCount());
sr.add(new Range(1,12));
assertEquals(12, sr.getIndicesCount());
sr.add(new Range(7,9));
assertEquals(15, sr.getIndicesCount());
sr.add(new Range(31,10));
sr.add(new Range(51,10));
sr.add(new Range(66,10));
assertEquals(45, sr.getIndicesCount());
sr.add(new Range(21,50));
assertEquals(70, sr.getIndicesCount());
LOG.debug(sr);
Iterator<Long> it = sr.skipRangeIterator();
int i = 0;
assertEquals(i, it.next().longValue());
for(i=16;i<21;i++) {
assertEquals(i, it.next().longValue());
}
assertEquals(76, it.next().longValue());
assertEquals(77, it.next().longValue());
}
public void testRemove() {
SortedRanges sr = new SortedRanges();
sr.add(new Range(2,19));
assertEquals(19, sr.getIndicesCount());
sr.remove(new SortedRanges.Range(15,8));
assertEquals(13, sr.getIndicesCount());
sr.remove(new SortedRanges.Range(6,5));
assertEquals(8, sr.getIndicesCount());
sr.remove(new SortedRanges.Range(8,4));
assertEquals(7, sr.getIndicesCount());
sr.add(new Range(18,5));
assertEquals(12, sr.getIndicesCount());
sr.add(new Range(25,1));
assertEquals(13, sr.getIndicesCount());
sr.remove(new SortedRanges.Range(7,24));
assertEquals(4, sr.getIndicesCount());
sr.remove(new SortedRanges.Range(5,1));
assertEquals(3, sr.getIndicesCount());
LOG.debug(sr);
}
}
| 2,974 | 28.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.JobContextImpl;
import org.apache.hadoop.mapred.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.JobStatus;
public class TestMRCJCFileOutputCommitter extends TestCase {
private static Path outDir = new Path(
System.getProperty("test.build.data", "/tmp"), "output");
// A random task attempt id for testing.
private static String attempt = "attempt_200707121733_0001_m_000000_0";
private static TaskAttemptID taskID = TaskAttemptID.forName(attempt);
private Text key1 = new Text("key1");
private Text key2 = new Text("key2");
private Text val1 = new Text("val1");
private Text val2 = new Text("val2");
@SuppressWarnings("unchecked")
private void writeOutput(RecordWriter theRecordWriter, Reporter reporter)
throws IOException {
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
}
private void setConfForFileOutputCommitter(JobConf job) {
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
job.setOutputCommitter(FileOutputCommitter.class);
FileOutputFormat.setOutputPath(job, outDir);
}
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
JobConf job = new JobConf();
setConfForFileOutputCommitter(job);
JobContext jContext = new JobContextImpl(job, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
FileOutputFormat.setWorkOutputPath(job,
committer.getTaskAttemptPath(tContext));
committer.setupJob(jContext);
committer.setupTask(tContext);
String file = "test.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
// write output
FileSystem localFs = FileSystem.getLocal(job);
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
writeOutput(theRecordWriter, reporter);
// do commit
committer.commitTask(tContext);
committer.commitJob(jContext);
// validate output
File expectedFile = new File(new Path(outDir, file).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(output, expectedOutput.toString());
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testAbort() throws IOException {
JobConf job = new JobConf();
setConfForFileOutputCommitter(job);
JobContext jContext = new JobContextImpl(job, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
FileOutputFormat.setWorkOutputPath(job, committer
.getTaskAttemptPath(tContext));
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
String file = "test.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
// write output
FileSystem localFs = FileSystem.getLocal(job);
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
job, file, reporter);
writeOutput(theRecordWriter, reporter);
// do abort
committer.abortTask(tContext);
File expectedFile = new File(new Path(committer
.getTaskAttemptPath(tContext), file).toString());
assertFalse("task temp dir still exists", expectedFile.exists());
committer.abortJob(jContext, JobStatus.State.FAILED);
expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME)
.toString());
assertFalse("job temp dir "+expectedFile+" still exists", expectedFile.exists());
assertEquals("Output directory not empty", 0, new File(outDir.toString())
.listFiles().length);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public static class FakeFileSystem extends RawLocalFileSystem {
public FakeFileSystem() {
super();
}
public URI getUri() {
return URI.create("faildel:///");
}
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
throw new IOException("fake delete failed");
}
}
public void testFailAbort() throws IOException {
JobConf job = new JobConf();
job.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
job.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
setConfForFileOutputCommitter(job);
JobContext jContext = new JobContextImpl(job, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
FileOutputFormat.setWorkOutputPath(job, committer
.getTaskAttemptPath(tContext));
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
String file = "test.txt";
File jobTmpDir = new File(committer.getJobAttemptPath(jContext).toUri().getPath());
File taskTmpDir = new File(committer.getTaskAttemptPath(tContext).toUri().getPath());
File expectedFile = new File(taskTmpDir, file);
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
// write output
FileSystem localFs = new FakeFileSystem();
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
job, expectedFile.getAbsolutePath(), reporter);
writeOutput(theRecordWriter, reporter);
// do abort
Throwable th = null;
try {
committer.abortTask(tContext);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue(expectedFile + " does not exists", expectedFile.exists());
th = null;
try {
committer.abortJob(jContext, JobStatus.State.FAILED);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue("job temp dir does not exists", jobTmpDir.exists());
}
}
| 8,005 | 35.894009 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.MRCaching.TestResult;
import org.junit.Ignore;
/**
* A JUnit test to test caching with DFS
*
*/
@Ignore
public class TestMiniMRDFSCaching extends TestCase {
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
public static void main(String[] argv) throws Exception {
TestMiniMRDFSCaching td = new TestMiniMRDFSCaching();
td.testWithDFS();
}
}
| 2,874 | 35.392405 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Runs a job multiple times and takes average of all runs.
*/
public class MRBench extends Configured implements Tool{
private static final Log LOG = LogFactory.getLog(MRBench.class);
private static Path BASE_DIR =
new Path(System.getProperty("test.build.data","/benchmarks/MRBench"));
private static Path INPUT_DIR = new Path(BASE_DIR, "mr_input");
private static Path OUTPUT_DIR = new Path(BASE_DIR, "mr_output");
public static enum Order {RANDOM, ASCENDING, DESCENDING};
/**
* Takes input format as text lines, runs some processing on it and
* writes out data as text again.
*/
public static class Map extends MapReduceBase
implements Mapper<WritableComparable, Text, UTF8, UTF8> {
public void map(WritableComparable key, Text value,
OutputCollector<UTF8, UTF8> output,
Reporter reporter) throws IOException
{
String line = value.toString();
output.collect(new UTF8(process(line)), new UTF8(""));
}
public String process(String line) {
return line;
}
}
/**
* Ignores the key and writes values to the output.
*/
public static class Reduce extends MapReduceBase
implements Reducer<UTF8, UTF8, UTF8, UTF8> {
public void reduce(UTF8 key, Iterator<UTF8> values,
OutputCollector<UTF8, UTF8> output, Reporter reporter) throws IOException
{
while(values.hasNext()) {
output.collect(key, new UTF8(values.next().toString()));
}
}
}
/**
* Generate a text file on the given filesystem with the given path name.
* The text file will contain the given number of lines of generated data.
* The generated data are string representations of numbers. Each line
* is the same length, which is achieved by padding each number with
* an appropriate number of leading '0' (zero) characters. The order of
* generated data is one of ascending, descending, or random.
*/
public void generateTextFile(FileSystem fs, Path inputFile,
long numLines, Order sortOrder) throws IOException
{
LOG.info("creating control file: "+numLines+" numLines, "+sortOrder+" sortOrder");
PrintStream output = null;
try {
output = new PrintStream(fs.create(inputFile));
int padding = String.valueOf(numLines).length();
switch(sortOrder) {
case RANDOM:
for (long l = 0; l < numLines; l++) {
output.println(pad((new Random()).nextLong(), padding));
}
break;
case ASCENDING:
for (long l = 0; l < numLines; l++) {
output.println(pad(l, padding));
}
break;
case DESCENDING:
for (long l = numLines; l > 0; l--) {
output.println(pad(l, padding));
}
break;
}
} finally {
if (output != null)
output.close();
}
LOG.info("created control file: " + inputFile);
}
/**
* Convert the given number to a string and pad the number with
* leading '0' (zero) characters so that the string is exactly
* the given length.
*/
private static String pad(long number, int length) {
String str = String.valueOf(number);
StringBuffer value = new StringBuffer();
for (int i = str.length(); i < length; i++) {
value.append("0");
}
value.append(str);
return value.toString();
}
/**
* Create the job configuration.
*/
private JobConf setupJob(int numMaps, int numReduces, String jarFile) {
JobConf jobConf = new JobConf(getConf());
jobConf.setJarByClass(MRBench.class);
FileInputFormat.addInputPath(jobConf, INPUT_DIR);
jobConf.setInputFormat(TextInputFormat.class);
jobConf.setOutputFormat(TextOutputFormat.class);
jobConf.setOutputValueClass(UTF8.class);
jobConf.setMapOutputKeyClass(UTF8.class);
jobConf.setMapOutputValueClass(UTF8.class);
if (null != jarFile) {
jobConf.setJar(jarFile);
}
jobConf.setMapperClass(Map.class);
jobConf.setReducerClass(Reduce.class);
jobConf.setNumMapTasks(numMaps);
jobConf.setNumReduceTasks(numReduces);
jobConf
.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
return jobConf;
}
/**
* Runs a MapReduce task, given number of times. The input to each run
* is the same file.
*/
private ArrayList<Long> runJobInSequence(JobConf masterJobConf, int numRuns) throws IOException {
Random rand = new Random();
ArrayList<Long> execTimes = new ArrayList<Long>();
for (int i = 0; i < numRuns; i++) {
// create a new job conf every time, reusing same object does not work
JobConf jobConf = new JobConf(masterJobConf);
// reset the job jar because the copy constructor doesn't
jobConf.setJar(masterJobConf.getJar());
// give a new random name to output of the mapred tasks
FileOutputFormat.setOutputPath(jobConf,
new Path(OUTPUT_DIR, "output_" + rand.nextInt()));
LOG.info("Running job " + i + ":" +
" input=" + FileInputFormat.getInputPaths(jobConf)[0] +
" output=" + FileOutputFormat.getOutputPath(jobConf));
// run the mapred task now
long curTime = System.currentTimeMillis();
JobClient.runJob(jobConf);
execTimes.add(new Long(System.currentTimeMillis() - curTime));
}
return execTimes;
}
/**
* <pre>
* Usage: mrbench
* [-baseDir <base DFS path for output/input, default is /benchmarks/MRBench>]
* [-jar <local path to job jar file containing Mapper and Reducer implementations, default is current jar file>]
* [-numRuns <number of times to run the job, default is 1>]
* [-maps <number of maps for each run, default is 2>]
* [-reduces <number of reduces for each run, default is 1>]
* [-inputLines <number of input lines to generate, default is 1>]
* [-inputType <type of input to generate, one of ascending (default), descending, random>]
* [-verbose]
* </pre>
*/
public static void main (String[] args) throws Exception {
int res = ToolRunner.run(new MRBench(), args);
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
String version = "MRBenchmark.0.0.2";
System.out.println(version);
String usage =
"Usage: mrbench " +
"[-baseDir <base DFS path for output/input, default is /benchmarks/MRBench>] " +
"[-jar <local path to job jar file containing Mapper and Reducer implementations, default is current jar file>] " +
"[-numRuns <number of times to run the job, default is 1>] " +
"[-maps <number of maps for each run, default is 2>] " +
"[-reduces <number of reduces for each run, default is 1>] " +
"[-inputLines <number of input lines to generate, default is 1>] " +
"[-inputType <type of input to generate, one of ascending (default), descending, random>] " +
"[-verbose]";
String jarFile = null;
int inputLines = 1;
int numRuns = 1;
int numMaps = 2;
int numReduces = 1;
boolean verbose = false;
Order inputSortOrder = Order.ASCENDING;
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-jar")) {
jarFile = args[++i];
} else if (args[i].equals("-numRuns")) {
numRuns = Integer.parseInt(args[++i]);
} else if (args[i].equals("-baseDir")) {
BASE_DIR = new Path(args[++i]);
} else if (args[i].equals("-maps")) {
numMaps = Integer.parseInt(args[++i]);
} else if (args[i].equals("-reduces")) {
numReduces = Integer.parseInt(args[++i]);
} else if (args[i].equals("-inputLines")) {
inputLines = Integer.parseInt(args[++i]);
} else if (args[i].equals("-inputType")) {
String s = args[++i];
if (s.equalsIgnoreCase("ascending")) {
inputSortOrder = Order.ASCENDING;
} else if (s.equalsIgnoreCase("descending")) {
inputSortOrder = Order.DESCENDING;
} else if (s.equalsIgnoreCase("random")) {
inputSortOrder = Order.RANDOM;
} else {
inputSortOrder = null;
}
} else if (args[i].equals("-verbose")) {
verbose = true;
} else {
System.err.println(usage);
System.exit(-1);
}
}
if (numRuns < 1 || // verify args
numMaps < 1 ||
numReduces < 1 ||
inputLines < 0 ||
inputSortOrder == null)
{
System.err.println(usage);
return -1;
}
JobConf jobConf = setupJob(numMaps, numReduces, jarFile);
FileSystem fs = FileSystem.get(jobConf);
Path inputFile = new Path(INPUT_DIR, "input_" + (new Random()).nextInt() + ".txt");
generateTextFile(fs, inputFile, inputLines, inputSortOrder);
// setup test output directory
fs.mkdirs(BASE_DIR);
ArrayList<Long> execTimes = new ArrayList<Long>();
try {
execTimes = runJobInSequence(jobConf, numRuns);
} finally {
// delete output -- should we really do this?
fs.delete(BASE_DIR, true);
}
if (verbose) {
// Print out a report
System.out.println("Total MapReduce jobs executed: " + numRuns);
System.out.println("Total lines of data per job: " + inputLines);
System.out.println("Maps per job: " + numMaps);
System.out.println("Reduces per job: " + numReduces);
}
int i = 0;
long totalTime = 0;
for (Long time : execTimes) {
totalTime += time.longValue();
if (verbose) {
System.out.println("Total milliseconds for task: " + (++i) +
" = " + time);
}
}
long avgTime = totalTime / numRuns;
System.out.println("DataLines\tMaps\tReduces\tAvgTime (milliseconds)");
System.out.println(inputLines + "\t\t" + numMaps + "\t" +
numReduces + "\t" + avgTime);
return 0;
}
}
| 11,435 | 34.7375 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.TestMRJobClient;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.util.Tool;
import org.junit.Ignore;
@Ignore
public class TestMRCJCJobClient extends TestMRJobClient {
private String runJob() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.close();
JobConf conf = createJobConf();
conf.setJobName("mr");
conf.setJobPriority(JobPriority.HIGH);
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
return JobClient.runJob(conf).getID().toString();
}
public static int runTool(Configuration conf, Tool tool, String[] args,
OutputStream out) throws Exception {
return TestMRJobClient.runTool(conf, tool, args, out);
}
static void verifyJobPriority(String jobId, String priority,
JobConf conf) throws Exception {
TestMRCJCJobClient test = new TestMRCJCJobClient();
test.verifyJobPriority(jobId, priority, conf, test.createJobClient());
}
public void testJobClient() throws Exception {
Configuration conf = createJobConf();
String jobId = runJob();
testGetCounter(jobId, conf);
testAllJobList(jobId, conf);
testChangingJobPriority(jobId, conf);
}
protected CLI createJobClient()
throws IOException {
return new JobClient();
}
}
| 3,116 | 33.252747 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLazyOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.lib.LazyOutputFormat;
import junit.framework.TestCase;
/**
* A JUnit test to test the Map-Reduce framework's feature to create part
* files only if there is an explicit output.collect. This helps in preventing
* 0 byte files
*/
public class TestLazyOutput extends TestCase {
private static final int NUM_HADOOP_SLAVES = 3;
private static final int NUM_MAPS_PER_NODE = 2;
private static final Path INPUT = new Path("/testlazy/input");
private static final List<String> input =
Arrays.asList("All","Roads","Lead","To","Hadoop");
static class TestMapper extends MapReduceBase
implements Mapper<LongWritable, Text, LongWritable, Text> {
private String id;
public void configure(JobConf job) {
id = job.get(JobContext.TASK_ATTEMPT_ID);
}
public void map(LongWritable key, Text val,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
// Everybody other than id 0 outputs
if (!id.endsWith("0_0")) {
output.collect(key, val);
}
}
}
static class TestReducer extends MapReduceBase
implements Reducer<LongWritable, Text, LongWritable, Text> {
private String id;
public void configure(JobConf job) {
id = job.get(JobContext.TASK_ATTEMPT_ID);
}
/** Writes all keys and values directly to output. */
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
while (values.hasNext()) {
Text v = values.next();
//Reducer 0 skips collect
if (!id.endsWith("0_0")) {
output.collect(key, v);
}
}
}
}
private static void runTestLazyOutput(JobConf job, Path output,
int numReducers, boolean createLazily)
throws Exception {
job.setJobName("test-lazy-output");
FileInputFormat.setInputPaths(job, INPUT);
FileOutputFormat.setOutputPath(job, output);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
JobClient client = new JobClient(job);
job.setNumReduceTasks(numReducers);
if (createLazily) {
LazyOutputFormat.setOutputFormatClass
(job, TextOutputFormat.class);
} else {
job.setOutputFormat(TextOutputFormat.class);
}
JobClient.runJob(job);
}
public void createInput(FileSystem fs, int numMappers) throws Exception {
for (int i =0; i < numMappers; i++) {
OutputStream os = fs.create(new Path(INPUT,
"text" + i + ".txt"));
Writer wr = new OutputStreamWriter(os);
for(String inp : input) {
wr.write(inp+"\n");
}
wr.close();
}
}
public void testLazyOutput() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_HADOOP_SLAVES)
.build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1);
int numReducers = 2;
int numMappers = NUM_HADOOP_SLAVES * NUM_MAPS_PER_NODE;
createInput(fileSys, numMappers);
Path output1 = new Path("/testlazy/output1");
// Test 1.
runTestLazyOutput(mr.createJobConf(), output1,
numReducers, true);
Path[] fileList =
FileUtil.stat2Paths(fileSys.listStatus(output1,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test1 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == (numReducers - 1));
// Test 2. 0 Reducers, maps directly write to the output files
Path output2 = new Path("/testlazy/output2");
runTestLazyOutput(mr.createJobConf(), output2, 0, true);
fileList =
FileUtil.stat2Paths(fileSys.listStatus(output2,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test2 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == numMappers - 1);
// Test 3. 0 Reducers, but flag is turned off
Path output3 = new Path("/testlazy/output3");
runTestLazyOutput(mr.createJobConf(), output3, 0, false);
fileList =
FileUtil.stat2Paths(fileSys.listStatus(output3,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test3 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == numMappers);
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
}
| 6,558 | 31.795 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineOutputCollector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.mapred.Task.TaskReporter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
public class TestCombineOutputCollector {
private CombineOutputCollector<String, Integer> coc;
Counters.Counter outCounter = new Counters.Counter() {
private long value;
@Override
public void setValue(long value) {
this.value = value;
}
@Override
public void setDisplayName(String displayName) {
// TODO Auto-generated method stub
}
@Override
public void increment(long incr) {
this.value += incr;
}
@Override
public long getValue() {
return value;
}
@Override
public String getName() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getDisplayName() {
// TODO Auto-generated method stub
return null;
}
@Override
public String makeEscapedCompactString() {
// TODO Auto-generated method stub
return null;
}
@Override
public long getCounter() {
return value;
}
@Override
public boolean contentEquals(Counter counter) {
// TODO Auto-generated method stub
return false;
}
@Override
public void write(DataOutput out) throws IOException {
}
@Override
public void readFields(DataInput in) throws IOException {
}
};
@Test
public void testCustomCollect() throws Throwable {
//mock creation
TaskReporter mockTaskReporter = mock(TaskReporter.class);
@SuppressWarnings("unchecked")
Writer<String, Integer> mockWriter = mock(Writer.class);
Configuration conf = new Configuration();
conf.set(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, "2");
coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
coc.setWriter(mockWriter);
verify(mockTaskReporter, never()).progress();
coc.collect("dummy", 1);
verify(mockTaskReporter, never()).progress();
coc.collect("dummy", 2);
verify(mockTaskReporter, times(1)).progress();
}
@Test
public void testDefaultCollect() throws Throwable {
//mock creation
TaskReporter mockTaskReporter = mock(TaskReporter.class);
@SuppressWarnings("unchecked")
Writer<String, Integer> mockWriter = mock(Writer.class);
Configuration conf = new Configuration();
coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
coc.setWriter(mockWriter);
verify(mockTaskReporter, never()).progress();
for(int i = 0; i < Task.DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
coc.collect("dummy", i);
}
verify(mockTaskReporter, times(1)).progress();
for(int i = 0; i < Task.DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
coc.collect("dummy", i);
}
verify(mockTaskReporter, times(2)).progress();
}
}
| 4,222 | 27.727891 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Ignore;
@Ignore
public class TestBadRecords extends ClusterMapReduceTestCase {
private static final Log LOG =
LogFactory.getLog(TestBadRecords.class);
private static final List<String> MAPPER_BAD_RECORDS =
Arrays.asList("hello01","hello04","hello05");
private static final List<String> REDUCER_BAD_RECORDS =
Arrays.asList("hello08","hello10");
private List<String> input;
public TestBadRecords() {
input = new ArrayList<String>();
for(int i=1;i<=10;i++) {
String str = ""+i;
int zerosToPrepend = 2 - str.length();
for(int j=0;j<zerosToPrepend;j++){
str = "0"+str;
}
input.add("hello"+str);
}
}
private void runMapReduce(JobConf conf,
List<String> mapperBadRecords, List<String> redBadRecords)
throws Exception {
createInput();
conf.setJobName("mr");
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
conf.setInt(JobContext.TASK_TIMEOUT, 30*1000);
SkipBadRecords.setMapperMaxSkipRecords(conf, Long.MAX_VALUE);
SkipBadRecords.setReducerMaxSkipGroups(conf, Long.MAX_VALUE);
SkipBadRecords.setAttemptsToStartSkipping(conf,0);
//the no of attempts to successfully complete the task depends
//on the no of bad records.
conf.setMaxMapAttempts(SkipBadRecords.getAttemptsToStartSkipping(conf)+1+
mapperBadRecords.size());
conf.setMaxReduceAttempts(SkipBadRecords.getAttemptsToStartSkipping(conf)+
1+redBadRecords.size());
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
RunningJob runningJob = JobClient.runJob(conf);
validateOutput(conf, runningJob, mapperBadRecords, redBadRecords);
}
private void createInput() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
Writer wr = new OutputStreamWriter(os);
for(String inp : input) {
wr.write(inp+"\n");
}wr.close();
}
private void validateOutput(JobConf conf, RunningJob runningJob,
List<String> mapperBadRecords, List<String> redBadRecords)
throws Exception{
LOG.info(runningJob.getCounters().toString());
assertTrue(runningJob.isSuccessful());
//validate counters
Counters counters = runningJob.getCounters();
assertEquals(counters.findCounter(TaskCounter.MAP_SKIPPED_RECORDS).
getCounter(),mapperBadRecords.size());
int mapRecs = input.size() - mapperBadRecords.size();
assertEquals(counters.findCounter(TaskCounter.MAP_INPUT_RECORDS).
getCounter(),mapRecs);
assertEquals(counters.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).
getCounter(),mapRecs);
int redRecs = mapRecs - redBadRecords.size();
assertEquals(counters.findCounter(TaskCounter.REDUCE_SKIPPED_RECORDS).
getCounter(),redBadRecords.size());
assertEquals(counters.findCounter(TaskCounter.REDUCE_SKIPPED_GROUPS).
getCounter(),redBadRecords.size());
assertEquals(counters.findCounter(TaskCounter.REDUCE_INPUT_GROUPS).
getCounter(),redRecs);
assertEquals(counters.findCounter(TaskCounter.REDUCE_INPUT_RECORDS).
getCounter(),redRecs);
assertEquals(counters.findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS).
getCounter(),redRecs);
//validate skipped records
Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
assertNotNull(skipDir);
Path[] skips = FileUtil.stat2Paths(getFileSystem().listStatus(skipDir));
List<String> mapSkipped = new ArrayList<String>();
List<String> redSkipped = new ArrayList<String>();
for(Path skipPath : skips) {
LOG.info("skipPath: " + skipPath);
SequenceFile.Reader reader = new SequenceFile.Reader(
getFileSystem(), skipPath, conf);
Object key = ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Object value = ReflectionUtils.newInstance(reader.getValueClass(),
conf);
key = reader.next(key);
while(key!=null) {
value = reader.getCurrentValue(value);
LOG.debug("key:"+key+" value:"+value.toString());
if(skipPath.getName().contains("_r_")) {
redSkipped.add(value.toString());
} else {
mapSkipped.add(value.toString());
}
key = reader.next(key);
}
reader.close();
}
assertTrue(mapSkipped.containsAll(mapperBadRecords));
assertTrue(redSkipped.containsAll(redBadRecords));
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(getOutputDir(),
new Utils.OutputFileUtils.OutputFilesFilter()));
List<String> mapperOutput=getProcessed(input, mapperBadRecords);
LOG.debug("mapperOutput " + mapperOutput.size());
List<String> reducerOutput=getProcessed(mapperOutput, redBadRecords);
LOG.debug("reducerOutput " + reducerOutput.size());
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
StringTokenizer tokeniz = new StringTokenizer(line, "\t");
String key = tokeniz.nextToken();
String value = tokeniz.nextToken();
LOG.debug("Output: key:"+key + " value:"+value);
assertTrue(value.contains("hello"));
assertTrue(reducerOutput.contains(value));
line = reader.readLine();
}
reader.close();
assertEquals(reducerOutput.size(), counter);
}
}
private List<String> getProcessed(List<String> inputs, List<String> badRecs) {
List<String> processed = new ArrayList<String>();
for(String input : inputs) {
if(!badRecs.contains(input)) {
processed.add(input);
}
}
return processed;
}
public void testBadMapRed() throws Exception {
JobConf conf = createJobConf();
conf.setMapperClass(BadMapper.class);
conf.setReducerClass(BadReducer.class);
runMapReduce(conf, MAPPER_BAD_RECORDS, REDUCER_BAD_RECORDS);
}
static class BadMapper extends MapReduceBase implements
Mapper<LongWritable, Text, LongWritable, Text> {
public void map(LongWritable key, Text val,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
String str = val.toString();
LOG.debug("MAP key:" +key +" value:" + str);
if(MAPPER_BAD_RECORDS.get(0).equals(str)) {
LOG.warn("MAP Encountered BAD record");
System.exit(-1);
}
else if(MAPPER_BAD_RECORDS.get(1).equals(str)) {
LOG.warn("MAP Encountered BAD record");
throw new RuntimeException("Bad record "+str);
}
else if(MAPPER_BAD_RECORDS.get(2).equals(str)) {
try {
LOG.warn("MAP Encountered BAD record");
Thread.sleep(15*60*1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
output.collect(key, val);
}
}
static class BadReducer extends MapReduceBase implements
Reducer<LongWritable, Text, LongWritable, Text> {
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
while(values.hasNext()) {
Text value = values.next();
LOG.debug("REDUCE key:" +key +" value:" + value);
if(REDUCER_BAD_RECORDS.get(0).equals(value.toString())) {
LOG.warn("REDUCE Encountered BAD record");
System.exit(-1);
}
else if(REDUCER_BAD_RECORDS.get(1).equals(value.toString())) {
try {
LOG.warn("REDUCE Encountered BAD record");
Thread.sleep(15*60*1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
output.collect(key, value);
}
}
}
}
| 9,893 | 34.978182 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRBringup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import junit.framework.TestCase;
/**
* A Unit-test to test bringup and shutdown of Mini Map-Reduce Cluster.
*/
public class TestMiniMRBringup extends TestCase {
public void testBringUp() throws IOException {
MiniMRCluster mr = null;
try {
mr = new MiniMRCluster(1, "local", 1);
} finally {
if (mr != null) { mr.shutdown(); }
}
}
}
| 1,241 | 30.846154 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFieldSelection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.lib.*;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper;
import org.apache.hadoop.mapreduce.lib.fieldsel.TestMRFieldSelection;
import junit.framework.TestCase;
import java.text.NumberFormat;
public class TestFieldSelection extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
public void testFieldSelection() throws Exception {
launch();
}
public static void launch() throws Exception {
JobConf conf = new JobConf(TestFieldSelection.class);
FileSystem fs = FileSystem.get(conf);
int numOfInputLines = 10;
Path OUTPUT_DIR = new Path("build/test/output_for_field_selection_test");
Path INPUT_DIR = new Path("build/test/input_for_field_selection_test");
String inputFile = "input.txt";
fs.delete(INPUT_DIR, true);
fs.mkdirs(INPUT_DIR);
fs.delete(OUTPUT_DIR, true);
StringBuffer inputData = new StringBuffer();
StringBuffer expectedOutput = new StringBuffer();
TestMRFieldSelection.constructInputOutputData(inputData,
expectedOutput, numOfInputLines);
FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
fileOut.write(inputData.toString().getBytes("utf-8"));
fileOut.close();
System.out.println("inputData:");
System.out.println(inputData.toString());
JobConf job = new JobConf(conf, TestFieldSelection.class);
FileInputFormat.setInputPaths(job, INPUT_DIR);
job.setInputFormat(TextInputFormat.class);
job.setMapperClass(FieldSelectionMapReduce.class);
job.setReducerClass(FieldSelectionMapReduce.class);
FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setOutputFormat(TextOutputFormat.class);
job.setNumReduceTasks(1);
job.set(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "-");
job.set(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "6,5,1-3:0-");
job.set(FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, ":4,3,2,1,0,0-");
JobClient.runJob(job);
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
boolean success = true;
Path outPath = new Path(OUTPUT_DIR, "part-00000");
String outdata = MapReduceTestUtil.readOutput(outPath,job);
assertEquals(expectedOutput.toString(),outdata);
fs.delete(OUTPUT_DIR, true);
fs.delete(INPUT_DIR, true);
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}
| 3,704 | 34.625 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestSequenceFileAsBinaryInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int RECORDS = 10000;
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Text tkey = new Text();
Text tval = new Text();
SequenceFile.Writer writer =
new SequenceFile.Writer(fs, job, file, Text.class, Text.class);
try {
for (int i = 0; i < RECORDS; ++i) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
writer.append(tkey, tval);
}
} finally {
writer.close();
}
InputFormat<BytesWritable,BytesWritable> bformat =
new SequenceFileAsBinaryInputFormat();
int count = 0;
r.setSeed(seed);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
Text cmpkey = new Text();
Text cmpval = new Text();
DataInputBuffer buf = new DataInputBuffer();
final int NUM_SPLITS = 3;
FileInputFormat.setInputPaths(job, file);
for (InputSplit split : bformat.getSplits(job, NUM_SPLITS)) {
RecordReader<BytesWritable,BytesWritable> reader =
bformat.getRecordReader(split, job, Reporter.NULL);
try {
while (reader.next(bkey, bval)) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
buf.reset(bkey.getBytes(), bkey.getLength());
cmpkey.readFields(buf);
buf.reset(bval.getBytes(), bval.getLength());
cmpval.readFields(buf);
assertTrue(
"Keys don't match: " + "*" + cmpkey.toString() + ":" +
tkey.toString() + "*",
cmpkey.toString().equals(tkey.toString()));
assertTrue(
"Vals don't match: " + "*" + cmpval.toString() + ":" +
tval.toString() + "*",
cmpval.toString().equals(tval.toString()));
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
}
| 3,536 | 33.676471 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.ServletException;
import java.io.IOException;
import java.io.DataOutputStream;
/**
* Base class to test Job end notification in local and cluster mode.
*
* Starts up hadoop on Local or Cluster mode (by extending of the
* HadoopTestCase class) and it starts a servlet engine that hosts
* a servlet that will receive the notification of job finalization.
*
* The notification servlet returns a HTTP 400 the first time is called
* and a HTTP 200 the second time, thus testing retry.
*
* In both cases local file system is used (this is irrelevant for
* the tested functionality)
*
*
*/
public abstract class NotificationTestCase extends HadoopTestCase {
protected NotificationTestCase(int mode) throws IOException {
super(mode, HadoopTestCase.LOCAL_FS, 1, 1);
}
private int port;
private String contextPath = "/notification";
private String servletPath = "/mapred";
private Server webServer;
private void startHttpServer() throws Exception {
// Create the webServer
if (webServer != null) {
webServer.stop();
webServer = null;
}
webServer = new Server(0);
Context context = new Context(webServer, contextPath);
// create servlet handler
context.addServlet(new ServletHolder(new NotificationServlet()),
servletPath);
// Start webServer
webServer.start();
port = webServer.getConnectors()[0].getLocalPort();
}
private void stopHttpServer() throws Exception {
if (webServer != null) {
webServer.stop();
webServer.destroy();
webServer = null;
}
}
public static class NotificationServlet extends HttpServlet {
public static volatile int counter = 0;
public static volatile int failureCounter = 0;
private static final long serialVersionUID = 1L;
protected void doGet(HttpServletRequest req, HttpServletResponse res)
throws ServletException, IOException {
String queryString = req.getQueryString();
switch (counter) {
case 0:
verifyQuery(queryString, "SUCCEEDED");
break;
case 2:
verifyQuery(queryString, "KILLED");
break;
case 4:
verifyQuery(queryString, "FAILED");
break;
}
if (counter % 2 == 0) {
res.sendError(HttpServletResponse.SC_BAD_REQUEST, "forcing error");
}
else {
res.setStatus(HttpServletResponse.SC_OK);
}
counter++;
}
protected void verifyQuery(String query, String expected)
throws IOException {
if (query.contains(expected)) {
return;
}
failureCounter++;
assertTrue("The request (" + query + ") does not contain " + expected, false);
}
}
private String getNotificationUrlTemplate() {
return "http://localhost:" + port + contextPath + servletPath +
"?jobId=$jobId&jobStatus=$jobStatus";
}
protected JobConf createJobConf() {
JobConf conf = super.createJobConf();
conf.setJobEndNotificationURI(getNotificationUrlTemplate());
conf.setInt(JobContext.MR_JOB_END_RETRY_ATTEMPTS, 3);
conf.setInt(JobContext.MR_JOB_END_RETRY_INTERVAL, 200);
return conf;
}
protected void setUp() throws Exception {
super.setUp();
startHttpServer();
}
protected void tearDown() throws Exception {
stopHttpServer();
super.tearDown();
}
public void testMR() throws Exception {
System.out.println(launchWordCount(this.createJobConf(),
"a b c d e f g h", 1, 1));
boolean keepTrying = true;
for (int tries = 0; tries < 30 && keepTrying; tries++) {
Thread.sleep(50);
keepTrying = !(NotificationServlet.counter == 2);
}
assertEquals(2, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
Path inDir = new Path("notificationjob/input");
Path outDir = new Path("notificationjob/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data","/tmp")
.toString().replace(' ', '+');;
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
// run a job with KILLED status
System.out.println(UtilsForTests.runJobKill(this.createJobConf(), inDir,
outDir).getID());
keepTrying = true;
for (int tries = 0; tries < 30 && keepTrying; tries++) {
Thread.sleep(50);
keepTrying = !(NotificationServlet.counter == 4);
}
assertEquals(4, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
// run a job with FAILED status
System.out.println(UtilsForTests.runJobFail(this.createJobConf(), inDir,
outDir).getID());
keepTrying = true;
for (int tries = 0; tries < 30 && keepTrying; tries++) {
Thread.sleep(50);
keepTrying = !(NotificationServlet.counter == 6);
}
assertEquals(6, NotificationServlet.counter);
assertEquals(0, NotificationServlet.failureCounter);
}
private String launchWordCount(JobConf conf,
String input,
int numMaps,
int numReduces) throws IOException {
Path inDir = new Path("testing/wc/input");
Path outDir = new Path("testing/wc/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data","/tmp")
.toString().replace(' ', '+');;
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCount.MapClass.class);
conf.setCombinerClass(WordCount.Reduce.class);
conf.setReducerClass(WordCount.Reduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
JobClient.runJob(conf);
return MapReduceTestUtil.readOutput(outDir, conf);
}
}
| 8,097 | 31.918699 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/WordCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This is an example Hadoop Map/Reduce application.
* It reads the text input files, breaks each line into words
* and counts them. The output is a locally sorted list of words and the
* count of how often they occurred.
*
* To run: bin/hadoop jar build/hadoop-examples.jar wordcount
* [-m <i>maps</i>] [-r <i>reduces</i>] <i>in-dir</i> <i>out-dir</i>
*/
public class WordCount extends Configured implements Tool {
/**
* Counts the words in each line.
* For each line of input, break the line into words and emit them as
* (<b>word</b>, <b>1</b>).
*/
public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
output.collect(word, one);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce extends MapReduceBase
implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
static int printUsage() {
System.out.println("wordcount [-m <maps>] [-r <reduces>] <input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
/**
* The main driver for word count map/reduce program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName("wordcount");
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(MapClass.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
List<String> other_args = new ArrayList<String>();
for(int i=0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
conf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args[i])) {
conf.setNumReduceTasks(Integer.parseInt(args[++i]));
} else {
other_args.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage();
}
}
// Make sure there are exactly 2 parameters left.
if (other_args.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " +
other_args.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(conf, other_args.get(0));
FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(res);
}
}
| 5,667 | 34.425 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestOldCombinerGrouping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.junit.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.UUID;
public class TestOldCombinerGrouping {
private static String TEST_ROOT_DIR =
new File("build", UUID.randomUUID().toString()).getAbsolutePath();
public static class Map implements
Mapper<LongWritable, Text, Text, LongWritable> {
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException {
String v = value.toString();
String k = v.substring(0, v.indexOf(","));
v = v.substring(v.indexOf(",") + 1);
output.collect(new Text(k), new LongWritable(Long.parseLong(v)));
}
@Override
public void close() throws IOException {
}
@Override
public void configure(JobConf job) {
}
}
public static class Reduce implements
Reducer<Text, LongWritable, Text, LongWritable> {
@Override
public void reduce(Text key, Iterator<LongWritable> values,
OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException {
LongWritable maxValue = null;
while (values.hasNext()) {
LongWritable value = values.next();
if (maxValue == null) {
maxValue = value;
} else if (value.compareTo(maxValue) > 0) {
maxValue = value;
}
}
output.collect(key, maxValue);
}
@Override
public void close() throws IOException {
}
@Override
public void configure(JobConf job) {
}
}
public static class Combiner extends Reduce {
}
public static class GroupComparator implements RawComparator<Text> {
@Override
public int compare(byte[] bytes, int i, int i2, byte[] bytes2, int i3,
int i4) {
byte[] b1 = new byte[i2];
System.arraycopy(bytes, i, b1, 0, i2);
byte[] b2 = new byte[i4];
System.arraycopy(bytes2, i3, b2, 0, i4);
return compare(new Text(new String(b1)), new Text(new String(b2)));
}
@Override
public int compare(Text o1, Text o2) {
String s1 = o1.toString();
String s2 = o2.toString();
s1 = s1.substring(0, s1.indexOf("|"));
s2 = s2.substring(0, s2.indexOf("|"));
return s1.compareTo(s2);
}
}
@Test
public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in = new File(TEST_ROOT_DIR, "input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out = new File(TEST_ROOT_DIR, "output");
PrintWriter pw = new PrintWriter(new FileWriter(new File(in, "data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf job = new JobConf();
job.set("mapreduce.framework.name", "local");
TextInputFormat.setInputPaths(job, new Path(in.getPath()));
TextOutputFormat.setOutputPath(job, new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputValueGroupingComparator(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.setCombinerKeyGroupingComparator(GroupComparator.class);
job.setInt("min.num.spills.for.combine", 0);
JobClient client = new JobClient(job);
RunningJob runningJob = client.submitJob(job);
runningJob.waitForCompletion();
if (runningJob.isSuccessful()) {
Counters counters = runningJob.getCounters();
long combinerInputRecords = counters.getGroup(
"org.apache.hadoop.mapreduce.TaskCounter").
getCounter("COMBINE_INPUT_RECORDS");
long combinerOutputRecords = counters.getGroup(
"org.apache.hadoop.mapreduce.TaskCounter").
getCounter("COMBINE_OUTPUT_RECORDS");
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br = new BufferedReader(new FileReader(
new File(out, "part-00000")));
Set<String> output = new HashSet<String>();
String line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNull(line);
br.close();
Set<String> expected = new HashSet<String>();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected, output);
} else {
Assert.fail("Job failed");
}
}
}
| 6,158 | 31.078125 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* check for the job submission options of
* -jt local -libjars
*/
public class TestLocalJobSubmission {
private static Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
@Before
public void configure() throws Exception {
}
@After
public void cleanup() {
}
/**
* test the local job submission options of
* -jt local -libjars
* @throws IOException
*/
@Test
public void testLocalJobLibjarsOption() throws IOException {
Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
conf.set(MRConfig.FRAMEWORK_NAME, "local");
final String[] args = {
"-jt" , "local", "-libjars", jarPath.toString(),
"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
};
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
private Path makeJar(Path p) throws IOException {
FileOutputStream fos = new FileOutputStream(new File(p.toString()));
JarOutputStream jos = new JarOutputStream(fos);
ZipEntry ze = new ZipEntry("test.jar.inside");
jos.putNextEntry(ze);
jos.write(("inside the jar!").getBytes());
jos.closeEntry();
jos.close();
return p;
}
}
| 2,949 | 30.052632 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestSequenceFileAsBinaryOutputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSequenceFileAsBinaryOutputFormat.class.getName());
private static final int RECORDS = 10000;
// A random task attempt id for testing.
private static final String attempt = "attempt_200707121733_0001_m_000000_0";
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir =
new Path(new Path(new Path(System.getProperty("test.build.data",".")),
FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
if (!fs.mkdirs(dir)) {
fail("Failed to create output directory");
}
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, dir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, dir);
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job,
IntWritable.class );
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job,
DoubleWritable.class );
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.BLOCK);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
RecordWriter <BytesWritable, BytesWritable> writer =
new SequenceFileAsBinaryOutputFormat().getRecordWriter(fs,
job, file.toString(),
Reporter.NULL);
IntWritable iwritable = new IntWritable();
DoubleWritable dwritable = new DoubleWritable();
DataOutputBuffer outbuf = new DataOutputBuffer();
LOG.info("Creating data by SequenceFileAsBinaryOutputFormat");
try {
for (int i = 0; i < RECORDS; ++i) {
iwritable = new IntWritable(r.nextInt());
iwritable.write(outbuf);
bkey.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
dwritable = new DoubleWritable(r.nextDouble());
dwritable.write(outbuf);
bval.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
writer.write(bkey, bval);
}
} finally {
writer.close(Reporter.NULL);
}
InputFormat<IntWritable,DoubleWritable> iformat =
new SequenceFileInputFormat<IntWritable,DoubleWritable>();
int count = 0;
r.setSeed(seed);
DataInputBuffer buf = new DataInputBuffer();
final int NUM_SPLITS = 3;
SequenceFileInputFormat.addInputPath(job, file);
LOG.info("Reading data by SequenceFileInputFormat");
for (InputSplit split : iformat.getSplits(job, NUM_SPLITS)) {
RecordReader<IntWritable,DoubleWritable> reader =
iformat.getRecordReader(split, job, Reporter.NULL);
try {
int sourceInt;
double sourceDouble;
while (reader.next(iwritable, dwritable)) {
sourceInt = r.nextInt();
sourceDouble = r.nextDouble();
assertEquals(
"Keys don't match: " + "*" + iwritable.get() + ":" +
sourceInt + "*",
sourceInt, iwritable.get());
assertTrue(
"Vals don't match: " + "*" + dwritable.get() + ":" +
sourceDouble + "*",
Double.compare(dwritable.get(), sourceDouble) == 0 );
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
// Setting Random class to test getSequenceFileOutput{Key,Value}Class
job.setOutputKeyClass(FloatWritable.class);
job.setOutputValueClass(BooleanWritable.class);
assertEquals("SequenceFileOutputKeyClass should default to ouputKeyClass",
FloatWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(
job));
assertEquals("SequenceFileOutputValueClass should default to "
+ "ouputValueClass",
BooleanWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(
job));
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job,
IntWritable.class );
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job,
DoubleWritable.class );
assertEquals("SequenceFileOutputKeyClass not updated",
IntWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(
job));
assertEquals("SequenceFileOutputValueClass not updated",
DoubleWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(
job));
}
public void testcheckOutputSpecsForbidRecordCompression() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path outputdir = new Path(System.getProperty("test.build.data",".")
+ "/output");
fs.delete(dir, true);
fs.delete(outputdir, true);
if (!fs.mkdirs(dir)) {
fail("Failed to create output directory");
}
FileOutputFormat.setWorkOutputPath(job, dir);
// Without outputpath, FileOutputFormat.checkoutputspecs will throw
// InvalidJobConfException
FileOutputFormat.setOutputPath(job, outputdir);
// SequenceFileAsBinaryOutputFormat doesn't support record compression
// It should throw an exception when checked by checkOutputSpecs
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.BLOCK);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(fs, job);
} catch (Exception e) {
fail("Block compression should be allowed for "
+ "SequenceFileAsBinaryOutputFormat:"
+ "Caught " + e.getClass().getName());
}
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.RECORD);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(fs, job);
fail("Record compression should not be allowed for "
+"SequenceFileAsBinaryOutputFormat");
} catch (InvalidJobConfException ie) {
// expected
} catch (Exception e) {
fail("Expected " + InvalidJobConfException.class.getName()
+ "but caught " + e.getClass().getName() );
}
}
}
| 8,689 | 39.798122 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Date;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class BigMapOutput extends Configured implements Tool {
public static final Log LOG =
LogFactory.getLog(BigMapOutput.class.getName());
private static Random random = new Random();
public static String MIN_KEY = "mapreduce.bmo.minkey";
public static String MIN_VALUE = "mapreduce.bmo.minvalue";
public static String MAX_KEY = "mapreduce.bmo.maxkey";
public static String MAX_VALUE = "mapreduce.bmo.maxvalue";
private static void randomizeBytes(byte[] data, int offset, int length) {
for(int i=offset + length - 1; i >= offset; --i) {
data[i] = (byte) random.nextInt(256);
}
}
private static void createBigMapInputFile(Configuration conf, FileSystem fs,
Path dir, long fileSizeInMB)
throws IOException {
// Check if the input path exists and is non-empty
if (fs.exists(dir)) {
FileStatus[] list = fs.listStatus(dir);
if (list.length > 0) {
throw new IOException("Input path: " + dir + " already exists... ");
}
}
Path file = new Path(dir, "part-0");
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
BytesWritable.class, BytesWritable.class,
CompressionType.NONE);
long numBytesToWrite = fileSizeInMB * 1024 * 1024;
int minKeySize = conf.getInt(MIN_KEY, 10);;
int keySizeRange =
conf.getInt(MAX_KEY, 1000) - minKeySize;
int minValueSize = conf.getInt(MIN_VALUE, 0);
int valueSizeRange =
conf.getInt(MAX_VALUE, 20000) - minValueSize;
BytesWritable randomKey = new BytesWritable();
BytesWritable randomValue = new BytesWritable();
LOG.info("Writing " + numBytesToWrite + " bytes to " + file + " with " +
"minKeySize: " + minKeySize + " keySizeRange: " + keySizeRange +
" minValueSize: " + minValueSize + " valueSizeRange: " + valueSizeRange);
long start = System.currentTimeMillis();
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
writer.append(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
}
writer.close();
long end = System.currentTimeMillis();
LOG.info("Created " + file + " of size: " + fileSizeInMB + "MB in " +
(end-start)/1000 + "secs");
}
private static void usage() {
System.err.println("BigMapOutput -input <input-dir> -output <output-dir> " +
"[-create <filesize in MB>]");
ToolRunner.printGenericCommandUsage(System.err);
System.exit(1);
}
public int run(String[] args) throws Exception {
if (args.length < 4) { //input-dir should contain a huge file ( > 2GB)
usage();
}
Path bigMapInput = null;
Path outputPath = null;
boolean createInput = false;
long fileSizeInMB = 3 * 1024; // default of 3GB (>2GB)
for(int i=0; i < args.length; ++i) {
if ("-input".equals(args[i])){
bigMapInput = new Path(args[++i]);
} else if ("-output".equals(args[i])){
outputPath = new Path(args[++i]);
} else if ("-create".equals(args[i])) {
createInput = true;
fileSizeInMB = Long.parseLong(args[++i]);
} else {
usage();
}
}
FileSystem fs = FileSystem.get(getConf());
JobConf jobConf = new JobConf(getConf(), BigMapOutput.class);
jobConf.setJobName("BigMapOutput");
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, bigMapInput);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
if (createInput) {
createBigMapInputFile(jobConf, fs, bigMapInput, fileSizeInMB);
}
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
return 0;
}
public static void main(String argv[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new BigMapOutput(), argv);
System.exit(res);
}
}
| 6,493 | 37.886228 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.Random;
import java.util.Stack;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.RandomTextWriter;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class GenericMRLoadGenerator extends Configured implements Tool {
protected static int printUsage() {
System.err.println(
"Usage: [-m <maps>] [-r <reduces>]\n" +
" [-keepmap <percent>] [-keepred <percent>]\n" +
" [-indir <path>] [-outdir <path]\n" +
" [-inFormat[Indirect] <InputFormat>] [-outFormat <OutputFormat>]\n" +
" [-outKey <WritableComparable>] [-outValue <Writable>]\n");
GenericOptionsParser.printGenericCommandUsage(System.err);
return -1;
}
/**
* Configure a job given argv.
*/
public static boolean parseArgs(String[] argv, JobConf job) throws IOException {
if (argv.length < 1) {
return 0 == printUsage();
}
for(int i=0; i < argv.length; ++i) {
if (argv.length == i + 1) {
System.out.println("ERROR: Required parameter missing from " +
argv[i]);
return 0 == printUsage();
}
try {
if ("-m".equals(argv[i])) {
job.setNumMapTasks(Integer.parseInt(argv[++i]));
} else if ("-r".equals(argv[i])) {
job.setNumReduceTasks(Integer.parseInt(argv[++i]));
} else if ("-inFormat".equals(argv[i])) {
job.setInputFormat(
Class.forName(argv[++i]).asSubclass(InputFormat.class));
} else if ("-outFormat".equals(argv[i])) {
job.setOutputFormat(
Class.forName(argv[++i]).asSubclass(OutputFormat.class));
} else if ("-outKey".equals(argv[i])) {
job.setOutputKeyClass(
Class.forName(argv[++i]).asSubclass(WritableComparable.class));
} else if ("-outValue".equals(argv[i])) {
job.setOutputValueClass(
Class.forName(argv[++i]).asSubclass(Writable.class));
} else if ("-keepmap".equals(argv[i])) {
job.set(org.apache.hadoop.mapreduce.
GenericMRLoadGenerator.MAP_PRESERVE_PERCENT, argv[++i]);
} else if ("-keepred".equals(argv[i])) {
job.set(org.apache.hadoop.mapreduce.
GenericMRLoadGenerator.REDUCE_PRESERVE_PERCENT, argv[++i]);
} else if ("-outdir".equals(argv[i])) {
FileOutputFormat.setOutputPath(job, new Path(argv[++i]));
} else if ("-indir".equals(argv[i])) {
FileInputFormat.addInputPaths(job, argv[++i]);
} else if ("-inFormatIndirect".equals(argv[i])) {
job.setClass(org.apache.hadoop.mapreduce.
GenericMRLoadGenerator.INDIRECT_INPUT_FORMAT,
Class.forName(argv[++i]).asSubclass(InputFormat.class),
InputFormat.class);
job.setInputFormat(IndirectInputFormat.class);
} else {
System.out.println("Unexpected argument: " + argv[i]);
return 0 == printUsage();
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + argv[i]);
return 0 == printUsage();
} catch (Exception e) {
throw (IOException)new IOException().initCause(e);
}
}
return true;
}
public int run(String [] argv) throws Exception {
JobConf job = new JobConf(getConf());
job.setJarByClass(GenericMRLoadGenerator.class);
job.setMapperClass(SampleMapper.class);
job.setReducerClass(SampleReducer.class);
if (!parseArgs(argv, job)) {
return -1;
}
if (null == FileOutputFormat.getOutputPath(job)) {
// No output dir? No writes
job.setOutputFormat(NullOutputFormat.class);
}
if (0 == FileInputFormat.getInputPaths(job).length) {
// No input dir? Generate random data
System.err.println("No input path; ignoring InputFormat");
confRandom(job);
} else if (null != job.getClass(
org.apache.hadoop.mapreduce.GenericMRLoadGenerator.INDIRECT_INPUT_FORMAT,
null)) {
// specified IndirectInputFormat? Build src list
JobClient jClient = new JobClient(job);
Path tmpDir = new Path(jClient.getFs().getHomeDirectory(), ".staging");
Random r = new Random();
Path indirInputFile = new Path(tmpDir,
Integer.toString(r.nextInt(Integer.MAX_VALUE), 36) + "_files");
job.set(
org.apache.hadoop.mapreduce.GenericMRLoadGenerator.INDIRECT_INPUT_FILE,
indirInputFile.toString());
SequenceFile.Writer writer = SequenceFile.createWriter(
tmpDir.getFileSystem(job), job, indirInputFile,
LongWritable.class, Text.class,
SequenceFile.CompressionType.NONE);
try {
for (Path p : FileInputFormat.getInputPaths(job)) {
FileSystem fs = p.getFileSystem(job);
Stack<Path> pathstack = new Stack<Path>();
pathstack.push(p);
while (!pathstack.empty()) {
for (FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDirectory()) {
if (!stat.getPath().getName().startsWith("_")) {
pathstack.push(stat.getPath());
}
} else {
writer.sync();
writer.append(new LongWritable(stat.getLen()),
new Text(stat.getPath().toUri().toString()));
}
}
}
}
} finally {
writer.close();
}
}
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(job);
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return 0;
}
/**
* Main driver/hook into ToolRunner.
*/
public static void main(String[] argv) throws Exception {
int res =
ToolRunner.run(new Configuration(), new GenericMRLoadGenerator(), argv);
System.exit(res);
}
static class RandomInputFormat implements InputFormat {
public InputSplit[] getSplits(JobConf conf, int numSplits) {
InputSplit[] splits = new InputSplit[numSplits];
for (int i = 0; i < numSplits; ++i) {
splits[i] = new IndirectInputFormat.IndirectSplit(
new Path("ignore" + i), 1);
}
return splits;
}
public RecordReader<Text,Text> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
final IndirectInputFormat.IndirectSplit clSplit =
(IndirectInputFormat.IndirectSplit)split;
return new RecordReader<Text,Text>() {
boolean once = true;
public boolean next(Text key, Text value) {
if (once) {
key.set(clSplit.getPath().toString());
once = false;
return true;
}
return false;
}
public Text createKey() { return new Text(); }
public Text createValue() { return new Text(); }
public long getPos() { return 0; }
public void close() { }
public float getProgress() { return 0.0f; }
};
}
}
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
static class RandomMapOutput extends MapReduceBase
implements Mapper<Text,Text,Text,Text> {
StringBuilder sentence = new StringBuilder();
int keymin;
int keymax;
int valmin;
int valmax;
long bytesToWrite;
Random r = new Random();
private int generateSentence(Text t, int noWords) {
sentence.setLength(0);
--noWords;
for (int i = 0; i < noWords; ++i) {
sentence.append(words[r.nextInt(words.length)]);
sentence.append(" ");
}
if (noWords >= 0) sentence.append(words[r.nextInt(words.length)]);
t.set(sentence.toString());
return sentence.length();
}
public void configure(JobConf job) {
bytesToWrite = job.getLong(RandomTextWriter.BYTES_PER_MAP,
1*1024*1024*1024);
keymin = job.getInt(RandomTextWriter.MIN_KEY, 5);
keymax = job.getInt(RandomTextWriter.MAX_KEY, 10);
valmin = job.getInt(RandomTextWriter.MIN_VALUE, 5);
valmax = job.getInt(RandomTextWriter.MAX_VALUE, 10);
}
public void map(Text key, Text val, OutputCollector<Text,Text> output,
Reporter reporter) throws IOException {
long acc = 0L;
long recs = 0;
final int keydiff = keymax - keymin;
final int valdiff = valmax - valmin;
for (long i = 0L; acc < bytesToWrite; ++i) {
int recacc = 0;
recacc += generateSentence(key, keymin +
(0 == keydiff ? 0 : r.nextInt(keydiff)));
recacc += generateSentence(val, valmin +
(0 == valdiff ? 0 : r.nextInt(valdiff)));
output.collect(key, val);
++recs;
acc += recacc;
reporter.incrCounter(Counters.BYTES_WRITTEN, recacc);
reporter.incrCounter(Counters.RECORDS_WRITTEN, 1);
reporter.setStatus(acc + "/" + (bytesToWrite - acc) + " bytes");
}
reporter.setStatus("Wrote " + recs + " records");
}
}
/**
* When no input dir is specified, generate random data.
*/
protected static void confRandom(JobConf job)
throws IOException {
// from RandomWriter
job.setInputFormat(RandomInputFormat.class);
job.setMapperClass(RandomMapOutput.class);
final ClusterStatus cluster = new JobClient(job).getClusterStatus();
int numMapsPerHost = job.getInt(RandomTextWriter.MAPS_PER_HOST, 10);
long numBytesToWritePerMap =
job.getLong(RandomTextWriter.BYTES_PER_MAP, 1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
throw new IOException(
"Cannot have " + RandomTextWriter.BYTES_PER_MAP + " set to 0");
}
long totalBytesToWrite = job.getLong(RandomTextWriter.TOTAL_BYTES,
numMapsPerHost * numBytesToWritePerMap * cluster.getTaskTrackers());
int numMaps = (int)(totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
job.setLong(RandomTextWriter.BYTES_PER_MAP, totalBytesToWrite);
}
job.setNumMapTasks(numMaps);
}
// Sampling //
static abstract class SampleMapReduceBase<K extends WritableComparable,
V extends Writable>
extends MapReduceBase {
private long total;
private long kept = 0;
private float keep;
protected void setKeep(float keep) {
this.keep = keep;
}
protected void emit(K key, V val, OutputCollector<K,V> out)
throws IOException {
++total;
while((float) kept / total < keep) {
++kept;
out.collect(key, val);
}
}
}
public static class SampleMapper<K extends WritableComparable, V extends Writable>
extends SampleMapReduceBase<K,V> implements Mapper<K,V,K,V> {
public void configure(JobConf job) {
setKeep(job.getFloat(
org.apache.hadoop.mapreduce.GenericMRLoadGenerator.MAP_PRESERVE_PERCENT,
(float)100.0) /
(float)100.0);
}
public void map(K key, V val,
OutputCollector<K,V> output, Reporter reporter)
throws IOException {
emit(key, val, output);
}
}
public static class SampleReducer<K extends WritableComparable, V extends Writable>
extends SampleMapReduceBase<K,V> implements Reducer<K,V,K,V> {
public void configure(JobConf job) {
setKeep(job.getFloat(org.apache.hadoop.mapreduce.
GenericMRLoadGenerator.REDUCE_PRESERVE_PERCENT, (float)100.0) /
(float)100.0);
}
public void reduce(K key, Iterator<V> values,
OutputCollector<K,V> output, Reporter reporter)
throws IOException {
while (values.hasNext()) {
emit(key, values.next(), output);
}
}
}
// Indirect reads //
/**
* Obscures the InputFormat and location information to simulate maps
* reading input from arbitrary locations ("indirect" reads).
*/
static class IndirectInputFormat implements InputFormat {
static class IndirectSplit implements InputSplit {
Path file;
long len;
public IndirectSplit() { }
public IndirectSplit(Path file, long len) {
this.file = file;
this.len = len;
}
public Path getPath() { return file; }
public long getLength() { return len; }
public String[] getLocations() throws IOException {
return new String[]{};
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeString(out, file.toString());
WritableUtils.writeVLong(out, len);
}
public void readFields(DataInput in) throws IOException {
file = new Path(WritableUtils.readString(in));
len = WritableUtils.readVLong(in);
}
}
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
Path src = new Path(job.get(
org.apache.hadoop.mapreduce.GenericMRLoadGenerator.INDIRECT_INPUT_FILE,
null));
FileSystem fs = src.getFileSystem(job);
ArrayList<IndirectSplit> splits = new ArrayList<IndirectSplit>(numSplits);
LongWritable key = new LongWritable();
Text value = new Text();
for (SequenceFile.Reader sl = new SequenceFile.Reader(fs, src, job);
sl.next(key, value);) {
splits.add(new IndirectSplit(new Path(value.toString()), key.get()));
}
return splits.toArray(new IndirectSplit[splits.size()]);
}
public RecordReader getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
InputFormat indirIF = (InputFormat)ReflectionUtils.newInstance(
job.getClass(org.apache.hadoop.mapreduce.
GenericMRLoadGenerator.INDIRECT_INPUT_FORMAT,
SequenceFileInputFormat.class), job);
IndirectSplit is = ((IndirectSplit)split);
return indirIF.getRecordReader(new FileSplit(is.getPath(), 0,
is.getLength(), (String[])null),
job, reporter);
}
}
/**
* A random list of 1000 words from /usr/share/dict/words
*/
private static final String[] words = {
"diurnalness", "Homoiousian", "spiranthic", "tetragynian",
"silverhead", "ungreat", "lithograph", "exploiter",
"physiologian", "by", "hellbender", "Filipendula",
"undeterring", "antiscolic", "pentagamist", "hypoid",
"cacuminal", "sertularian", "schoolmasterism", "nonuple",
"gallybeggar", "phytonic", "swearingly", "nebular",
"Confervales", "thermochemically", "characinoid", "cocksuredom",
"fallacious", "feasibleness", "debromination", "playfellowship",
"tramplike", "testa", "participatingly", "unaccessible",
"bromate", "experientialist", "roughcast", "docimastical",
"choralcelo", "blightbird", "peptonate", "sombreroed",
"unschematized", "antiabolitionist", "besagne", "mastication",
"bromic", "sviatonosite", "cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis", "unfulminated", "Hester",
"oblongly", "blurredness", "authorling", "chasmy",
"Scorpaenidae", "toxihaemia", "Dictograph", "Quakerishly",
"deaf", "timbermonger", "strammel", "Thraupidae",
"seditious", "plerome", "Arneb", "eristically",
"serpentinic", "glaumrie", "socioromantic", "apocalypst",
"tartrous", "Bassaris", "angiolymphoma", "horsefly",
"kenno", "astronomize", "euphemious", "arsenide",
"untongued", "parabolicness", "uvanite", "helpless",
"gemmeous", "stormy", "templar", "erythrodextrin",
"comism", "interfraternal", "preparative", "parastas",
"frontoorbital", "Ophiosaurus", "diopside", "serosanguineous",
"ununiformly", "karyological", "collegian", "allotropic",
"depravity", "amylogenesis", "reformatory", "epidymides",
"pleurotropous", "trillium", "dastardliness", "coadvice",
"embryotic", "benthonic", "pomiferous", "figureheadship",
"Megaluridae", "Harpa", "frenal", "commotion",
"abthainry", "cobeliever", "manilla", "spiciferous",
"nativeness", "obispo", "monilioid", "biopsic",
"valvula", "enterostomy", "planosubulate", "pterostigma",
"lifter", "triradiated", "venialness", "tum",
"archistome", "tautness", "unswanlike", "antivenin",
"Lentibulariaceae", "Triphora", "angiopathy", "anta",
"Dawsonia", "becomma", "Yannigan", "winterproof",
"antalgol", "harr", "underogating", "ineunt",
"cornberry", "flippantness", "scyphostoma", "approbation",
"Ghent", "Macraucheniidae", "scabbiness", "unanatomized",
"photoelasticity", "eurythermal", "enation", "prepavement",
"flushgate", "subsequentially", "Edo", "antihero",
"Isokontae", "unforkedness", "porriginous", "daytime",
"nonexecutive", "trisilicic", "morphiomania", "paranephros",
"botchedly", "impugnation", "Dodecatheon", "obolus",
"unburnt", "provedore", "Aktistetae", "superindifference",
"Alethea", "Joachimite", "cyanophilous", "chorograph",
"brooky", "figured", "periclitation", "quintette",
"hondo", "ornithodelphous", "unefficient", "pondside",
"bogydom", "laurinoxylon", "Shiah", "unharmed",
"cartful", "noncrystallized", "abusiveness", "cromlech",
"japanned", "rizzomed", "underskin", "adscendent",
"allectory", "gelatinousness", "volcano", "uncompromisingly",
"cubit", "idiotize", "unfurbelowed", "undinted",
"magnetooptics", "Savitar", "diwata", "ramosopalmate",
"Pishquow", "tomorn", "apopenptic", "Haversian",
"Hysterocarpus", "ten", "outhue", "Bertat",
"mechanist", "asparaginic", "velaric", "tonsure",
"bubble", "Pyrales", "regardful", "glyphography",
"calabazilla", "shellworker", "stradametrical", "havoc",
"theologicopolitical", "sawdust", "diatomaceous", "jajman",
"temporomastoid", "Serrifera", "Ochnaceae", "aspersor",
"trailmaking", "Bishareen", "digitule", "octogynous",
"epididymitis", "smokefarthings", "bacillite", "overcrown",
"mangonism", "sirrah", "undecorated", "psychofugal",
"bismuthiferous", "rechar", "Lemuridae", "frameable",
"thiodiazole", "Scanic", "sportswomanship", "interruptedness",
"admissory", "osteopaedion", "tingly", "tomorrowness",
"ethnocracy", "trabecular", "vitally", "fossilism",
"adz", "metopon", "prefatorial", "expiscate",
"diathermacy", "chronist", "nigh", "generalizable",
"hysterogen", "aurothiosulphuric", "whitlowwort", "downthrust",
"Protestantize", "monander", "Itea", "chronographic",
"silicize", "Dunlop", "eer", "componental",
"spot", "pamphlet", "antineuritic", "paradisean",
"interruptor", "debellator", "overcultured", "Florissant",
"hyocholic", "pneumatotherapy", "tailoress", "rave",
"unpeople", "Sebastian", "thermanesthesia", "Coniferae",
"swacking", "posterishness", "ethmopalatal", "whittle",
"analgize", "scabbardless", "naught", "symbiogenetically",
"trip", "parodist", "columniform", "trunnel",
"yawler", "goodwill", "pseudohalogen", "swangy",
"cervisial", "mediateness", "genii", "imprescribable",
"pony", "consumptional", "carposporangial", "poleax",
"bestill", "subfebrile", "sapphiric", "arrowworm",
"qualminess", "ultraobscure", "thorite", "Fouquieria",
"Bermudian", "prescriber", "elemicin", "warlike",
"semiangle", "rotular", "misthread", "returnability",
"seraphism", "precostal", "quarried", "Babylonism",
"sangaree", "seelful", "placatory", "pachydermous",
"bozal", "galbulus", "spermaphyte", "cumbrousness",
"pope", "signifier", "Endomycetaceae", "shallowish",
"sequacity", "periarthritis", "bathysphere", "pentosuria",
"Dadaism", "spookdom", "Consolamentum", "afterpressure",
"mutter", "louse", "ovoviviparous", "corbel",
"metastoma", "biventer", "Hydrangea", "hogmace",
"seizing", "nonsuppressed", "oratorize", "uncarefully",
"benzothiofuran", "penult", "balanocele", "macropterous",
"dishpan", "marten", "absvolt", "jirble",
"parmelioid", "airfreighter", "acocotl", "archesporial",
"hypoplastral", "preoral", "quailberry", "cinque",
"terrestrially", "stroking", "limpet", "moodishness",
"canicule", "archididascalian", "pompiloid", "overstaid",
"introducer", "Italical", "Christianopaganism", "prescriptible",
"subofficer", "danseuse", "cloy", "saguran",
"frictionlessly", "deindividualization", "Bulanda", "ventricous",
"subfoliar", "basto", "scapuloradial", "suspend",
"stiffish", "Sphenodontidae", "eternal", "verbid",
"mammonish", "upcushion", "barkometer", "concretion",
"preagitate", "incomprehensible", "tristich", "visceral",
"hemimelus", "patroller", "stentorophonic", "pinulus",
"kerykeion", "brutism", "monstership", "merciful",
"overinstruct", "defensibly", "bettermost", "splenauxe",
"Mormyrus", "unreprimanded", "taver", "ell",
"proacquittal", "infestation", "overwoven", "Lincolnlike",
"chacona", "Tamil", "classificational", "lebensraum",
"reeveland", "intuition", "Whilkut", "focaloid",
"Eleusinian", "micromembrane", "byroad", "nonrepetition",
"bacterioblast", "brag", "ribaldrous", "phytoma",
"counteralliance", "pelvimetry", "pelf", "relaster",
"thermoresistant", "aneurism", "molossic", "euphonym",
"upswell", "ladhood", "phallaceous", "inertly",
"gunshop", "stereotypography", "laryngic", "refasten",
"twinling", "oflete", "hepatorrhaphy", "electrotechnics",
"cockal", "guitarist", "topsail", "Cimmerianism",
"larklike", "Llandovery", "pyrocatechol", "immatchable",
"chooser", "metrocratic", "craglike", "quadrennial",
"nonpoisonous", "undercolored", "knob", "ultratense",
"balladmonger", "slait", "sialadenitis", "bucketer",
"magnificently", "unstipulated", "unscourged", "unsupercilious",
"packsack", "pansophism", "soorkee", "percent",
"subirrigate", "champer", "metapolitics", "spherulitic",
"involatile", "metaphonical", "stachyuraceous", "speckedness",
"bespin", "proboscidiform", "gul", "squit",
"yeelaman", "peristeropode", "opacousness", "shibuichi",
"retinize", "yote", "misexposition", "devilwise",
"pumpkinification", "vinny", "bonze", "glossing",
"decardinalize", "transcortical", "serphoid", "deepmost",
"guanajuatite", "wemless", "arval", "lammy",
"Effie", "Saponaria", "tetrahedral", "prolificy",
"excerpt", "dunkadoo", "Spencerism", "insatiately",
"Gilaki", "oratorship", "arduousness", "unbashfulness",
"Pithecolobium", "unisexuality", "veterinarian", "detractive",
"liquidity", "acidophile", "proauction", "sural",
"totaquina", "Vichyite", "uninhabitedness", "allegedly",
"Gothish", "manny", "Inger", "flutist",
"ticktick", "Ludgatian", "homotransplant", "orthopedical",
"diminutively", "monogoneutic", "Kenipsim", "sarcologist",
"drome", "stronghearted", "Fameuse", "Swaziland",
"alen", "chilblain", "beatable", "agglomeratic",
"constitutor", "tendomucoid", "porencephalous", "arteriasis",
"boser", "tantivy", "rede", "lineamental",
"uncontradictableness", "homeotypical", "masa", "folious",
"dosseret", "neurodegenerative", "subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly", "chalcites", "piquantness",
"lampyrine", "Aplacentalia", "projecting", "elastivity",
"isopelletierin", "bladderwort", "strander", "almud",
"iniquitously", "theologal", "bugre", "chargeably",
"imperceptivity", "meriquinoidal", "mesophyte", "divinator",
"perfunctory", "counterappellant", "synovial", "charioteer",
"crystallographical", "comprovincial", "infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic", "subangulated", "supraoesophageal",
"Vaishnavism", "transude", "chrysochrous", "ungrave",
"reconciliable", "uninterpleaded", "erlking", "wherefrom",
"aprosopia", "antiadiaphorist", "metoxazine", "incalculable",
"umbellic", "predebit", "foursquare", "unimmortal",
"nonmanufacture", "slangy", "predisputant", "familist",
"preaffiliate", "friarhood", "corelysis", "zoonitic",
"halloo", "paunchy", "neuromimesis", "aconitine",
"hackneyed", "unfeeble", "cubby", "autoschediastical",
"naprapath", "lyrebird", "inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse", "uncombable", "tambo",
"propodiale", "diplomatize", "Russifier", "clanned",
"corona", "michigan", "nonutilitarian", "transcorporeal",
"bought", "Cercosporella", "stapedius", "glandularly",
"pictorially", "weism", "disilane", "rainproof",
"Caphtor", "scrubbed", "oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion", "Oryzorictinae", "gala",
"Mycogone", "reappreciate", "cyanoguanidine", "seeingness",
"breadwinner", "noreast", "furacious", "epauliere",
"omniscribent", "Passiflorales", "uninductive", "inductivity",
"Orbitolina", "Semecarpus", "migrainoid", "steprelationship",
"phlogisticate", "mesymnion", "sloped", "edificator",
"beneficent", "culm", "paleornithology", "unurban",
"throbless", "amplexifoliate", "sesquiquintile", "sapience",
"astucious", "dithery", "boor", "ambitus",
"scotching", "uloid", "uncompromisingness", "hoove",
"waird", "marshiness", "Jerusalem", "mericarp",
"unevoked", "benzoperoxide", "outguess", "pyxie",
"hymnic", "euphemize", "mendacity", "erythremia",
"rosaniline", "unchatteled", "lienteria", "Bushongo",
"dialoguer", "unrepealably", "rivethead", "antideflation",
"vinegarish", "manganosiderite", "doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa", "Animalivora", "angina",
"planispheric", "ipomoein", "cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae", "shola", "pentafid",
"overstudiousness", "times", "nonprofession", "appetible",
"valvulotomy", "goladar", "uniarticular", "oxyterpene",
"unlapsing", "omega", "trophonema", "seminonflammable",
"circumzenithal", "starer", "depthwise", "liberatress",
"unleavened", "unrevolting", "groundneedle", "topline",
"wandoo", "umangite", "ordinant", "unachievable",
"oversand", "snare", "avengeful", "unexplicit",
"mustafina", "sonable", "rehabilitative", "eulogization",
"papery", "technopsychology", "impressor", "cresylite",
"entame", "transudatory", "scotale", "pachydermatoid",
"imaginary", "yeat", "slipped", "stewardship",
"adatom", "cockstone", "skyshine", "heavenful",
"comparability", "exprobratory", "dermorhynchous", "parquet",
"cretaceous", "vesperal", "raphis", "undangered",
"Glecoma", "engrain", "counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales", "warriorwise", "extraorganismal",
"overbuilt", "alveolite", "tetchy", "terrificness",
"widdle", "unpremonished", "rebilling", "sequestrum",
"equiconvex", "heliocentricism", "catabaptist", "okonite",
"propheticism", "helminthagogic", "calycular", "giantly",
"wingable", "golem", "unprovided", "commandingness",
"greave", "haply", "doina", "depressingly",
"subdentate", "impairment", "decidable", "neurotrophic",
"unpredict", "bicorporeal", "pendulant", "flatman",
"intrabred", "toplike", "Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid", "dipsomaniacal", "aquiline",
"atlantite", "ascitic", "perculsive", "prospectiveness",
"saponaceous", "centrifugalization", "dinical", "infravaginal",
"beadroll", "affaite", "Helvidian", "tickleproof",
"abstractionism", "enhedge", "outwealth", "overcontribute",
"coldfinch", "gymnastic", "Pincian", "Munychian",
"codisjunct", "quad", "coracomandibular", "phoenicochroite",
"amender", "selectivity", "putative", "semantician",
"lophotrichic", "Spatangoidea", "saccharogenic", "inferent",
"Triconodonta", "arrendation", "sheepskin", "taurocolla",
"bunghole", "Machiavel", "triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric", "pneumonalgia", "sleigher",
"emir", "Socraticism", "licitness", "massedly",
"instructiveness", "sturdied", "redecrease", "starosta",
"evictor", "orgiastic", "squdge", "meloplasty",
"Tsonecan", "repealableness", "swoony", "myesthesia",
"molecule", "autobiographist", "reciprocation", "refective",
"unobservantness", "tricae", "ungouged", "floatability",
"Mesua", "fetlocked", "chordacentrum", "sedentariness",
"various", "laubanite", "nectopod", "zenick",
"sequentially", "analgic", "biodynamics", "posttraumatic",
"nummi", "pyroacetic", "bot", "redescend",
"dispermy", "undiffusive", "circular", "trillion",
"Uraniidae", "ploration", "discipular", "potentness",
"sud", "Hu", "Eryon", "plugger",
"subdrainage", "jharal", "abscission", "supermarket",
"countergabion", "glacierist", "lithotresis", "minniebush",
"zanyism", "eucalypteol", "sterilely", "unrealize",
"unpatched", "hypochondriacism", "critically", "cheesecutter",
};
}
| 30,533 | 42.62 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.jobhistory.EventType;
import org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.junit.Assert;
import org.junit.Test;
public class TestMRTimelineEventHandling {
@Test
public void testTimelineServiceStartInMiniCluster() throws Exception {
Configuration conf = new YarnConfiguration();
/*
* Timeline service should not start if the config is set to false
* Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
*/
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
Assert.assertNull("Timeline Service should not have been started",
cluster.getApplicationHistoryServer());
}
finally {
if(cluster != null) {
cluster.stop();
}
}
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
cluster = null;
try {
cluster = new MiniMRYarnCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
Assert.assertNull("Timeline Service should not have been started",
cluster.getApplicationHistoryServer());
}
finally {
if(cluster != null) {
cluster.stop();
}
}
}
@Test
public void testMRTimelineEventHandling() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
TimelineStore ts = cluster.getApplicationHistoryServer()
.getTimelineStore();
Path inDir = new Path("input");
Path outDir = new Path("output");
RunningJob job =
UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED,
job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(tEntity.getEvents().size() - 1)
.getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(),
tEntity.getEvents().get(0).getEventType());
job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.FAILED,
job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null,
null, null, null, null);
Assert.assertEquals(2, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
Assert.assertEquals("MAPREDUCE_JOB", tEntity.getEntityType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(tEntity.getEvents().size() - 1)
.getEventType());
Assert.assertEquals(EventType.JOB_FAILED.toString(),
tEntity.getEvents().get(0).getEventType());
} finally {
if (cluster != null) {
cluster.stop();
}
}
}
@Test
public void testMapreduceJobTimelineServiceEnabled()
throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
MiniMRYarnCluster cluster = null;
try {
cluster = new MiniMRYarnCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
TimelineStore ts = cluster.getApplicationHistoryServer()
.getTimelineStore();
Path inDir = new Path("input");
Path outDir = new Path("output");
RunningJob job =
UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED,
job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
null, null, null, null, null, null, null);
Assert.assertEquals(0, entities.getEntities().size());
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED,
job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null,
null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
} finally {
if (cluster != null) {
cluster.stop();
}
}
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
cluster = null;
try {
cluster = new MiniMRYarnCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1);
cluster.init(conf);
cluster.start();
TimelineStore ts = cluster.getApplicationHistoryServer()
.getTimelineStore();
Path inDir = new Path("input");
Path outDir = new Path("output");
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
RunningJob job =
UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED,
job.getJobStatus().getState().getValue());
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
null, null, null, null, null, null, null);
Assert.assertEquals(0, entities.getEntities().size());
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
job = UtilsForTests.runJobSucceed(new JobConf(conf), inDir, outDir);
Assert.assertEquals(JobStatus.SUCCEEDED,
job.getJobStatus().getState().getValue());
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null,
null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(job.getID().toString(), tEntity.getEntityId());
} finally {
if (cluster != null) {
cluster.stop();
}
}
}
}
| 8,777 | 39.638889 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClasspath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* A JUnit test to test Mini Map-Reduce Cluster with multiple directories
* and check for correct classpath
*/
public class TestMiniMRClasspath {
static void configureWordCount(FileSystem fs, JobConf conf, String input,
int numMaps, int numReduces, Path inDir, Path outDir) throws IOException {
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
FileSystem.setDefaultUri(conf, fs.getUri());
conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME);
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.set("mapred.mapper.class", "testjar.ClassWordCount$MapClass");
conf.set("mapred.combine.class", "testjar.ClassWordCount$Reduce");
conf.set("mapred.reducer.class", "testjar.ClassWordCount$Reduce");
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
//set the tests jar file
conf.setJarByClass(TestMiniMRClasspath.class);
}
static String launchWordCount(URI fileSys, JobConf conf, String input,
int numMaps, int numReduces)
throws IOException {
final Path inDir = new Path("/testing/wc/input");
final Path outDir = new Path("/testing/wc/output");
FileSystem fs = FileSystem.get(fileSys, conf);
configureWordCount(fs, conf, input, numMaps, numReduces, inDir, outDir);
JobClient.runJob(conf);
StringBuffer result = new StringBuffer();
{
Path[] parents = FileUtil.stat2Paths(fs.listStatus(outDir.getParent()));
Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
BufferedReader file =
new BufferedReader(new InputStreamReader(fs.open(fileList[i])));
String line = file.readLine();
while (line != null) {
result.append(line);
result.append("\n");
line = file.readLine();
}
file.close();
}
}
return result.toString();
}
static String launchExternal(URI uri, JobConf conf, String input,
int numMaps, int numReduces)
throws IOException {
final Path inDir = new Path("/testing/ext/input");
final Path outDir = new Path("/testing/ext/output");
FileSystem fs = FileSystem.get(uri, conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
FileSystem.setDefaultUri(conf, uri);
conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME);
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
// the keys are counts
conf.setOutputValueClass(IntWritable.class);
// the values are the messages
conf.set(JobContext.OUTPUT_KEY_CLASS, "testjar.ExternalWritable");
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
conf.set("mapred.mapper.class", "testjar.ExternalMapperReducer");
conf.set("mapred.reducer.class", "testjar.ExternalMapperReducer");
// set the tests jar file
conf.setJarByClass(TestMiniMRClasspath.class);
JobClient.runJob(conf);
StringBuffer result = new StringBuffer();
Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
new Utils.OutputFileUtils
.OutputFilesFilter()));
for (int i = 0; i < fileList.length; ++i) {
BufferedReader file = new BufferedReader(new InputStreamReader(
fs.open(fileList[i])));
String line = file.readLine();
while (line != null) {
result.append(line);
line = file.readLine();
result.append("\n");
}
file.close();
}
return result.toString();
}
@Test
public void testClassPath() throws IOException {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
final int taskTrackers = 4;
final int jobTrackerPort = 60050;
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 3);
JobConf jobConf = mr.createJobConf();
String result;
result = launchWordCount(fileSys.getUri(), jobConf,
"The quick brown fox\nhas many silly\n" + "red fox sox\n", 3, 1);
Assert.assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n"
+ "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result);
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
@Test
public void testExternalWritable()
throws IOException {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
final int taskTrackers = 4;
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 3);
JobConf jobConf = mr.createJobConf();
String result;
result = launchExternal(fileSys.getUri(), jobConf,
"Dennis was here!\nDennis again!", 3, 1);
Assert.assertEquals("Dennis again!\t1\nDennis was here!\t1\n", result);
}
finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
}
| 7,832 | 33.96875 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestGetSplitHosts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.net.NetworkTopology;
import junit.framework.TestCase;
public class TestGetSplitHosts extends TestCase {
public void testGetSplitHosts() throws Exception {
int numBlocks = 3;
int block1Size = 100, block2Size = 150, block3Size = 75;
int fileSize = block1Size + block2Size + block3Size;
int replicationFactor = 3;
NetworkTopology clusterMap = new NetworkTopology();
BlockLocation[] bs = new BlockLocation[numBlocks];
String [] block1Hosts = {"host1","host2","host3"};
String [] block1Names = {"host1:100","host2:100","host3:100"};
String [] block1Racks = {"/rack1/","/rack1/","/rack2/"};
String [] block1Paths = new String[replicationFactor];
for (int i = 0; i < replicationFactor; i++) {
block1Paths[i] = block1Racks[i]+block1Names[i];
}
bs[0] = new BlockLocation(block1Names,block1Hosts,
block1Paths,0,block1Size);
String [] block2Hosts = {"host4","host5","host6"};
String [] block2Names = {"host4:100","host5:100","host6:100"};
String [] block2Racks = {"/rack2/","/rack3/","/rack3/"};
String [] block2Paths = new String[replicationFactor];
for (int i = 0; i < replicationFactor; i++) {
block2Paths[i] = block2Racks[i]+block2Names[i];
}
bs[1] = new BlockLocation(block2Names,block2Hosts,
block2Paths,block1Size,block2Size);
String [] block3Hosts = {"host1","host7","host8"};
String [] block3Names = {"host1:100","host7:100","host8:100"};
String [] block3Racks = {"/rack1/","/rack4/","/rack4/"};
String [] block3Paths = new String[replicationFactor];
for (int i = 0; i < replicationFactor; i++) {
block3Paths[i] = block3Racks[i]+block3Names[i];
}
bs[2] = new BlockLocation(block3Names,block3Hosts,
block3Paths,block1Size+block2Size,
block3Size);
SequenceFileInputFormat< String, String> sif =
new SequenceFileInputFormat<String,String>();
String [] hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
// Contributions By Racks are
// Rack1 175
// Rack2 275
// Rack3 150
// So, Rack2 hosts, host4 and host 3 should be returned
// even if their individual contribution is not the highest
assertTrue (hosts.length == replicationFactor);
assertTrue(hosts[0].equalsIgnoreCase("host4"));
assertTrue(hosts[1].equalsIgnoreCase("host3"));
assertTrue(hosts[2].equalsIgnoreCase("host1"));
// Now Create the blocks without topology information
bs[0] = new BlockLocation(block1Names,block1Hosts,0,block1Size);
bs[1] = new BlockLocation(block2Names,block2Hosts,block1Size,block2Size);
bs[2] = new BlockLocation(block3Names,block3Hosts,block1Size+block2Size,
block3Size);
hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
// host1 makes the highest contribution among all hosts
// So, that should be returned before others
assertTrue (hosts.length == replicationFactor);
assertTrue(hosts[0].equalsIgnoreCase("host1"));
}
}
| 4,096 | 36.935185 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRCaching.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.*;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.MRJobConfig;
import java.net.URI;
import org.junit.Assert;
public class MRCaching {
static String testStr = "This is a test file " + "used for testing caching "
+ "jars, zip and normal files.";
/**
* Using the wordcount example and adding caching to it. The cache
* archives/files are set and then are checked in the map if they have been
* localized or not.
*/
public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
JobConf conf;
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void configure(JobConf jconf) {
conf = jconf;
try {
Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
// read the cached files (unzipped, unjarred and text)
// and put it into a single file TEST_ROOT_DIR/test.txt
String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
Path file = new Path("file:///", TEST_ROOT_DIR);
FileSystem fs = FileSystem.getLocal(conf);
if (!fs.mkdirs(file)) {
throw new IOException("Mkdirs failed to create " + file.toString());
}
Path fileOut = new Path(file, "test.txt");
fs.delete(fileOut, true);
DataOutputStream out = fs.create(fileOut);
for (int i = 0; i < localArchives.length; i++) {
// read out the files from these archives
File f = new File(localArchives[i].toString());
File txt = new File(f, "test.txt");
FileInputStream fin = new FileInputStream(txt);
DataInputStream din = new DataInputStream(fin);
String str = din.readLine();
din.close();
out.writeBytes(str);
out.writeBytes("\n");
}
for (int i = 0; i < localFiles.length; i++) {
// read out the files from these archives
File txt = new File(localFiles[i].toString());
FileInputStream fin = new FileInputStream(txt);
DataInputStream din = new DataInputStream(fin);
String str = din.readLine();
out.writeBytes(str);
out.writeBytes("\n");
}
out.close();
} catch (IOException ie) {
System.out.println(StringUtils.stringifyException(ie));
}
}
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
output.collect(word, one);
}
}
}
/**
* Using the wordcount example and adding caching to it. The cache
* archives/files are set and then are checked in the map if they have been
* symlinked or not.
*/
public static class MapClass2 extends MapClass {
JobConf conf;
public void configure(JobConf jconf) {
conf = jconf;
try {
// read the cached files (unzipped, unjarred and text)
// and put it into a single file TEST_ROOT_DIR/test.txt
String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
Path file = new Path("file:///", TEST_ROOT_DIR);
FileSystem fs = FileSystem.getLocal(conf);
if (!fs.mkdirs(file)) {
throw new IOException("Mkdirs failed to create " + file.toString());
}
Path fileOut = new Path(file, "test.txt");
fs.delete(fileOut, true);
DataOutputStream out = fs.create(fileOut);
String[] symlinks = new String[6];
symlinks[0] = ".";
symlinks[1] = "testjar";
symlinks[2] = "testzip";
symlinks[3] = "testtgz";
symlinks[4] = "testtargz";
symlinks[5] = "testtar";
for (int i = 0; i < symlinks.length; i++) {
// read out the files from these archives
File f = new File(symlinks[i]);
File txt = new File(f, "test.txt");
FileInputStream fin = new FileInputStream(txt);
BufferedReader reader = new BufferedReader(new InputStreamReader(fin));
String str = reader.readLine();
reader.close();
out.writeBytes(str);
out.writeBytes("\n");
}
out.close();
} catch (IOException ie) {
System.out.println(StringUtils.stringifyException(ie));
}
}
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class ReduceClass extends MapReduceBase
implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static class TestResult {
public RunningJob job;
public boolean isOutputOk;
TestResult(RunningJob job, boolean isOutputOk) {
this.job = job;
this.isOutputOk = isOutputOk;
}
}
static void setupCache(String cacheDir, FileSystem fs)
throws IOException {
Path localPath = new Path(System.getProperty("test.cache.data", "build/test/cache"));
Path txtPath = new Path(localPath, new Path("test.txt"));
Path jarPath = new Path(localPath, new Path("test.jar"));
Path zipPath = new Path(localPath, new Path("test.zip"));
Path tarPath = new Path(localPath, new Path("test.tgz"));
Path tarPath1 = new Path(localPath, new Path("test.tar.gz"));
Path tarPath2 = new Path(localPath, new Path("test.tar"));
Path cachePath = new Path(cacheDir);
fs.delete(cachePath, true);
if (!fs.mkdirs(cachePath)) {
throw new IOException("Mkdirs failed to create " + cachePath.toString());
}
fs.copyFromLocalFile(txtPath, cachePath);
fs.copyFromLocalFile(jarPath, cachePath);
fs.copyFromLocalFile(zipPath, cachePath);
fs.copyFromLocalFile(tarPath, cachePath);
fs.copyFromLocalFile(tarPath1, cachePath);
fs.copyFromLocalFile(tarPath2, cachePath);
}
public static TestResult launchMRCache(String indir,
String outdir, String cacheDir,
JobConf conf, String input)
throws IOException {
String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp"))
.toString().replace(' ', '+');
//if (TEST_ROOT_DIR.startsWith("C:")) TEST_ROOT_DIR = "/tmp";
conf.set("test.build.data", TEST_ROOT_DIR);
final Path inDir = new Path(indir);
final Path outDir = new Path(outdir);
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
System.out.println("HERE:"+inDir);
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
conf.setJobName("cachetest");
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setCombinerClass(MRCaching.ReduceClass.class);
conf.setReducerClass(MRCaching.ReduceClass.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
conf.setSpeculativeExecution(false);
URI[] uris = new URI[6];
conf.setMapperClass(MRCaching.MapClass2.class);
uris[0] = fs.getUri().resolve(cacheDir + "/test.txt");
uris[1] = fs.getUri().resolve(cacheDir + "/test.jar");
uris[2] = fs.getUri().resolve(cacheDir + "/test.zip");
uris[3] = fs.getUri().resolve(cacheDir + "/test.tgz");
uris[4] = fs.getUri().resolve(cacheDir + "/test.tar.gz");
uris[5] = fs.getUri().resolve(cacheDir + "/test.tar");
DistributedCache.addCacheFile(uris[0], conf);
// Save expected file sizes
long[] fileSizes = new long[1];
fileSizes[0] = fs.getFileStatus(new Path(uris[0].getPath())).getLen();
long[] archiveSizes = new long[5]; // track last 5
for (int i = 1; i < 6; i++) {
DistributedCache.addCacheArchive(uris[i], conf);
archiveSizes[i-1] = // starting with second archive
fs.getFileStatus(new Path(uris[i].getPath())).getLen();
}
RunningJob job = JobClient.runJob(conf);
int count = 0;
// after the job ran check to see if the input from the localized cache
// match the real string. check if there are 3 instances or not.
Path result = new Path(TEST_ROOT_DIR + "/test.txt");
{
BufferedReader file = new BufferedReader
(new InputStreamReader(FileSystem.getLocal(conf).open(result)));
String line = file.readLine();
while (line != null) {
if (!testStr.equals(line))
return new TestResult(job, false);
count++;
line = file.readLine();
}
file.close();
}
if (count != 6)
return new TestResult(job, false);
// Check to ensure the filesizes of files in DC were correctly saved.
// Note, the underlying job clones the original conf before determine
// various stats (timestamps etc.), so we have to getConfiguration here.
validateCacheFileSizes(job.getConfiguration(), fileSizes,
MRJobConfig.CACHE_FILES_SIZES);
validateCacheFileSizes(job.getConfiguration(), archiveSizes,
MRJobConfig.CACHE_ARCHIVES_SIZES);
return new TestResult(job, true);
}
private static void validateCacheFileSizes(Configuration job,
long[] expectedSizes,
String configKey)
throws IOException {
String configValues = job.get(configKey, "");
System.out.println(configKey + " -> " + configValues);
String[] realSizes = StringUtils.getStrings(configValues);
Assert.assertEquals("Number of files for "+ configKey,
expectedSizes.length, realSizes.length);
for (int i=0; i < expectedSizes.length; ++i) {
long actual = Long.valueOf(realSizes[i]);
long expected = expectedSizes[i];
Assert.assertEquals("File "+ i +" for "+ configKey, expected, actual);
}
}
}
| 12,073 | 36.84953 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Ignore;
/**
* This test checks whether the task caches are created and used properly.
*/
@Ignore
public class TestMultipleLevelCaching extends TestCase {
private static final int MAX_LEVEL = 5;
final Path inDir = new Path("/cachetesting");
final Path outputPath = new Path("/output");
/**
* Returns a string representing a rack with level + 1 nodes in the topology
* for the rack.
* For id = 2, level = 2 we get /a/b2/c2
* id = 1, level = 3 we get /a/b1/c1/d1
* NOTE There should always be one shared node i.e /a
* @param id Unique Id for the rack
* @param level The level in the topology where the separation starts
*/
private static String getRack(int id, int level) {
StringBuilder rack = new StringBuilder();
char alpha = 'a';
int length = level + 1;
while (length > level) {
rack.append("/");
rack.append(alpha);
++alpha;
--length;
}
while (length > 0) {
rack.append("/");
rack.append(alpha);
rack.append(id);
++alpha;
--length;
}
return rack.toString();
}
public void testMultiLevelCaching() throws Exception {
for (int i = 1 ; i <= MAX_LEVEL; ++i) {
testCachingAtLevel(i);
}
}
private void testCachingAtLevel(int level) throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
String testName = "TestMultiLevelCaching";
try {
final int taskTrackers = 1;
// generate the racks
// use rack1 for data node
String rack1 = getRack(0, level);
// use rack2 for task tracker
String rack2 = getRack(1, level);
Configuration conf = new Configuration();
// Run a datanode on host1 under /a/b/c/..../d1/e1/f1
dfs = new MiniDFSCluster.Builder(conf).racks(new String[] {rack1})
.hosts(new String[] {"host1.com"}).build();
dfs.waitActive();
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
UtilsForTests.writeFile(dfs.getNameNode(), conf,
new Path(inDir + "/file"), (short)1);
namenode = (dfs.getFileSystem()).getUri().getHost() + ":" +
(dfs.getFileSystem()).getUri().getPort();
// Run a job with the (only)tasktracker on host2 under diff topology
// e.g /a/b/c/..../d2/e2/f2.
JobConf jc = new JobConf();
// cache-level = level (unshared levels) + 1(topmost shared node i.e /a)
// + 1 (for host)
jc.setInt(JTConfig.JT_TASKCACHE_LEVELS, level + 2);
mr = new MiniMRCluster(taskTrackers, namenode, 1, new String[] {rack2},
new String[] {"host2.com"}, jc);
/* The job is configured with 1 map for one (non-splittable) file.
* Since the datanode is running under different subtree, there is no
* node-level data locality but there should be topological locality.
*/
launchJobAndTestCounters(
testName, mr, fileSys, inDir, outputPath, 1, 1, 0, 0);
mr.shutdown();
} finally {
if (null != fileSys) {
// inDir, outputPath only exist if fileSys is valid.
fileSys.delete(inDir, true);
fileSys.delete(outputPath, true);
}
if (dfs != null) {
dfs.shutdown();
}
}
}
/**
* Launches a MR job and tests the job counters against the expected values.
* @param testName The name for the job
* @param mr The MR cluster
* @param fileSys The FileSystem
* @param in Input path
* @param out Output path
* @param numMaps Number of maps
* @param otherLocalMaps Expected value of other local maps
* @param datalocalMaps Expected value of data(node) local maps
* @param racklocalMaps Expected value of rack local maps
*/
static void launchJobAndTestCounters(String jobName, MiniMRCluster mr,
FileSystem fileSys, Path in, Path out,
int numMaps, int otherLocalMaps,
int dataLocalMaps, int rackLocalMaps)
throws IOException {
JobConf jobConf = mr.createJobConf();
if (fileSys.exists(out)) {
fileSys.delete(out, true);
}
RunningJob job = launchJob(jobConf, in, out, numMaps, jobName);
Counters counters = job.getCounters();
assertEquals("Number of local maps",
counters.getCounter(JobCounter.OTHER_LOCAL_MAPS), otherLocalMaps);
assertEquals("Number of Data-local maps",
counters.getCounter(JobCounter.DATA_LOCAL_MAPS),
dataLocalMaps);
assertEquals("Number of Rack-local maps",
counters.getCounter(JobCounter.RACK_LOCAL_MAPS),
rackLocalMaps);
mr.waitUntilIdle();
mr.shutdown();
}
static RunningJob launchJob(JobConf jobConf, Path inDir, Path outputPath,
int numMaps, String jobName) throws IOException {
jobConf.setJobName(jobName);
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, inDir);
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
jobConf.setNumMapTasks(numMaps);
jobConf.setNumReduceTasks(0);
jobConf.setJar("build/test/mapred/testjar/testjob.jar");
return JobClient.runJob(jobConf);
}
}
| 7,169 | 37.138298 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTaskPerformanceSplits.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTaskPerformanceSplits {
@Test
public void testPeriodStatsets() {
PeriodicStatsAccumulator cumulative = new CumulativePeriodicStats(8);
PeriodicStatsAccumulator status = new StatePeriodicStats(8);
cumulative.extend(0.0D, 0);
cumulative.extend(0.4375D, 700); // 200 per octant
cumulative.extend(0.5625D, 1100); // 0.5 = 900
cumulative.extend(0.625D, 1300);
cumulative.extend(1.0D, 7901);
int total = 0;
int[] results = cumulative.getValues();
for (int i = 0; i < 8; ++i) {
System.err.println("segment i = " + results[i]);
}
assertEquals("Bad interpolation in cumulative segment 0", 200, results[0]);
assertEquals("Bad interpolation in cumulative segment 1", 200, results[1]);
assertEquals("Bad interpolation in cumulative segment 2", 200, results[2]);
assertEquals("Bad interpolation in cumulative segment 3", 300, results[3]);
assertEquals("Bad interpolation in cumulative segment 4", 400, results[4]);
assertEquals("Bad interpolation in cumulative segment 5", 2200, results[5]);
// these are rounded down
assertEquals("Bad interpolation in cumulative segment 6", 2200, results[6]);
assertEquals("Bad interpolation in cumulative segment 7", 2201, results[7]);
status.extend(0.0D, 0);
status.extend(1.0D/16.0D, 300); // + 75 for bucket 0
status.extend(3.0D/16.0D, 700); // + 200 for 0, +300 for 1
status.extend(7.0D/16.0D, 2300); // + 450 for 1, + 1500 for 2, + 1050 for 3
status.extend(1.0D, 1400); // +1125 for 3, +2100 for 4, +1900 for 5,
; // +1700 for 6, +1500 for 7
results = status.getValues();
assertEquals("Bad interpolation in status segment 0", 275, results[0]);
assertEquals("Bad interpolation in status segment 1", 750, results[1]);
assertEquals("Bad interpolation in status segment 2", 1500, results[2]);
assertEquals("Bad interpolation in status segment 3", 2175, results[3]);
assertEquals("Bad interpolation in status segment 4", 2100, results[4]);
assertEquals("Bad interpolation in status segment 5", 1900, results[5]);
assertEquals("Bad interpolation in status segment 6", 1700, results[6]);
assertEquals("Bad interpolation in status segment 7", 1500, results[7]);
}
}
| 3,190 | 43.319444 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
/**
* Utility methods used in various Job Control unit tests.
*/
public class JobControlTestUtils {
static private Random rand = new Random();
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
/**
* Cleans the data from the passed Path in the passed FileSystem.
*
* @param fs FileSystem to delete data from.
* @param dirPath Path to be deleted.
* @throws IOException If an error occurs cleaning the data.
*/
static void cleanData(FileSystem fs, Path dirPath) throws IOException {
fs.delete(dirPath, true);
}
/**
* Generates a string of random digits.
*
* @return A random string.
*/
private static String generateRandomWord() {
return idFormat.format(rand.nextLong());
}
/**
* Generates a line of random text.
*
* @return A line of random text.
*/
private static String generateRandomLine() {
long r = rand.nextLong() % 7;
long n = r + 20;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < n; i++) {
sb.append(generateRandomWord()).append(" ");
}
sb.append("\n");
return sb.toString();
}
/**
* Generates data that can be used for Job Control tests.
*
* @param fs FileSystem to create data in.
* @param dirPath Path to create the data in.
* @throws IOException If an error occurs creating the data.
*/
static void generateData(FileSystem fs, Path dirPath) throws IOException {
FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt"));
for (int i = 0; i < 10000; i++) {
String line = generateRandomLine();
out.write(line.getBytes("UTF-8"));
}
out.close();
}
/**
* Creates a simple copy job.
*
* @param indirs List of input directories.
* @param outdir Output directory.
* @return JobConf initialised for a simple copy job.
* @throws Exception If an error occurs creating job configuration.
*/
static JobConf createCopyJob(List<Path> indirs, Path outdir) throws Exception {
Configuration defaults = new Configuration();
JobConf theJob = new JobConf(defaults, TestJobControl.class);
theJob.setJobName("DataMoveJob");
FileInputFormat.setInputPaths(theJob, indirs.toArray(new Path[0]));
theJob.setMapperClass(DataCopy.class);
FileOutputFormat.setOutputPath(theJob, outdir);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(DataCopy.class);
theJob.setNumMapTasks(12);
theJob.setNumReduceTasks(4);
return theJob;
}
/**
* Simple Mapper and Reducer implementation which copies data it reads in.
*/
public static class DataCopy extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text>, Reducer<Text, Text, Text, Text> {
public void map(LongWritable key, Text value, OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
output.collect(new Text(key.toString()), value);
}
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
Text dumbKey = new Text("");
while (values.hasNext()) {
Text data = values.next();
output.collect(dumbKey, data);
}
}
}
}
| 4,969 | 31.064516 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.junit.Test;
/**
* This class performs unit test for Job/JobControl classes.
*
*/
public class TestJobControl extends junit.framework.TestCase {
/**
* This is a main function for testing JobControl class.
* It first cleans all the dirs it will use. Then it generates some random text
* data in TestJobControlData/indir. Then it creates 4 jobs:
* Job 1: copy data from indir to outdir_1
* Job 2: copy data from indir to outdir_2
* Job 3: copy data from outdir_1 and outdir_2 to outdir_3
* Job 4: copy data from outdir to outdir_4
* The jobs 1 and 2 have no dependency. The job 3 depends on jobs 1 and 2.
* The job 4 depends on job 3.
*
* Then it creates a JobControl object and add the 4 jobs to the JobControl object.
* Finally, it creates a thread to run the JobControl object and monitors/reports
* the job states.
*/
public static void doJobControlTest() throws Exception {
Configuration defaults = new Configuration();
FileSystem fs = FileSystem.get(defaults);
Path rootDataDir = new Path(System.getProperty("test.build.data", "."), "TestJobControlData");
Path indir = new Path(rootDataDir, "indir");
Path outdir_1 = new Path(rootDataDir, "outdir_1");
Path outdir_2 = new Path(rootDataDir, "outdir_2");
Path outdir_3 = new Path(rootDataDir, "outdir_3");
Path outdir_4 = new Path(rootDataDir, "outdir_4");
JobControlTestUtils.cleanData(fs, indir);
JobControlTestUtils.generateData(fs, indir);
JobControlTestUtils.cleanData(fs, outdir_1);
JobControlTestUtils.cleanData(fs, outdir_2);
JobControlTestUtils.cleanData(fs, outdir_3);
JobControlTestUtils.cleanData(fs, outdir_4);
ArrayList<Job> dependingJobs = null;
ArrayList<Path> inPaths_1 = new ArrayList<Path>();
inPaths_1.add(indir);
JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
Job job_1 = new Job(jobConf_1, dependingJobs);
ArrayList<Path> inPaths_2 = new ArrayList<Path>();
inPaths_2.add(indir);
JobConf jobConf_2 = JobControlTestUtils.createCopyJob(inPaths_2, outdir_2);
Job job_2 = new Job(jobConf_2, dependingJobs);
ArrayList<Path> inPaths_3 = new ArrayList<Path>();
inPaths_3.add(outdir_1);
inPaths_3.add(outdir_2);
JobConf jobConf_3 = JobControlTestUtils.createCopyJob(inPaths_3, outdir_3);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_1);
dependingJobs.add(job_2);
Job job_3 = new Job(jobConf_3, dependingJobs);
ArrayList<Path> inPaths_4 = new ArrayList<Path>();
inPaths_4.add(outdir_3);
JobConf jobConf_4 = JobControlTestUtils.createCopyJob(inPaths_4, outdir_4);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_3);
Job job_4 = new Job(jobConf_4, dependingJobs);
JobControl theControl = new JobControl("Test");
theControl.addJob((ControlledJob) job_1);
theControl.addJob((ControlledJob) job_2);
theControl.addJob(job_3);
theControl.addJob(job_4);
Thread theController = new Thread(theControl);
theController.start();
while (!theControl.allFinished()) {
System.out.println("Jobs in waiting state: "
+ theControl.getWaitingJobs().size());
System.out.println("Jobs in ready state: "
+ theControl.getReadyJobs().size());
System.out.println("Jobs in running state: "
+ theControl.getRunningJobs().size());
System.out.println("Jobs in success state: "
+ theControl.getSuccessfulJobs().size());
System.out.println("Jobs in failed state: "
+ theControl.getFailedJobs().size());
System.out.println("\n");
try {
Thread.sleep(5000);
} catch (Exception e) {
}
}
System.out.println("Jobs are all done???");
System.out.println("Jobs in waiting state: "
+ theControl.getWaitingJobs().size());
System.out.println("Jobs in ready state: "
+ theControl.getReadyJobs().size());
System.out.println("Jobs in running state: "
+ theControl.getRunningJobs().size());
System.out.println("Jobs in success state: "
+ theControl.getSuccessfulJobs().size());
System.out.println("Jobs in failed state: "
+ theControl.getFailedJobs().size());
System.out.println("\n");
if (job_1.getState() != Job.FAILED &&
job_1.getState() != Job.DEPENDENT_FAILED &&
job_1.getState() != Job.SUCCESS) {
String states = "job_1: " + job_1.getState() + "\n";
throw new Exception("The state of job_1 is not in a complete state\n" + states);
}
if (job_2.getState() != Job.FAILED &&
job_2.getState() != Job.DEPENDENT_FAILED &&
job_2.getState() != Job.SUCCESS) {
String states = "job_2: " + job_2.getState() + "\n";
throw new Exception("The state of job_2 is not in a complete state\n" + states);
}
if (job_3.getState() != Job.FAILED &&
job_3.getState() != Job.DEPENDENT_FAILED &&
job_3.getState() != Job.SUCCESS) {
String states = "job_3: " + job_3.getState() + "\n";
throw new Exception("The state of job_3 is not in a complete state\n" + states);
}
if (job_4.getState() != Job.FAILED &&
job_4.getState() != Job.DEPENDENT_FAILED &&
job_4.getState() != Job.SUCCESS) {
String states = "job_4: " + job_4.getState() + "\n";
throw new Exception("The state of job_4 is not in a complete state\n" + states);
}
if (job_1.getState() == Job.FAILED ||
job_2.getState() == Job.FAILED ||
job_1.getState() == Job.DEPENDENT_FAILED ||
job_2.getState() == Job.DEPENDENT_FAILED) {
if (job_3.getState() != Job.DEPENDENT_FAILED) {
String states = "job_1: " + job_1.getState() + "\n";
states = "job_2: " + job_2.getState() + "\n";
states = "job_3: " + job_3.getState() + "\n";
states = "job_4: " + job_4.getState() + "\n";
throw new Exception("The states of jobs 1, 2, 3, 4 are not consistent\n" + states);
}
}
if (job_3.getState() == Job.FAILED ||
job_3.getState() == Job.DEPENDENT_FAILED) {
if (job_4.getState() != Job.DEPENDENT_FAILED) {
String states = "job_3: " + job_3.getState() + "\n";
states = "job_4: " + job_4.getState() + "\n";
throw new Exception("The states of jobs 3, 4 are not consistent\n" + states);
}
}
theControl.stop();
}
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
Job job_1 = getCopyJob();
JobControl jc = new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING, job_1.getState());
job_1.setState(Job.SUCCESS);
Assert.assertEquals(Job.WAITING, job_1.getState());
org.apache.hadoop.mapreduce.Job mockjob =
mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid =
new org.apache.hadoop.mapreduce.JobID("test", 0);
when(mockjob.getJobID()).thenReturn(jid);
job_1.setJob(mockjob);
Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
job_1.setMapredJobID("job_test_0001");
Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
jc.stop();
}
@Test(timeout = 30000)
public void testAddingDependingJob() throws Exception {
Job job_1 = getCopyJob();
ArrayList<Job> dependingJobs = new ArrayList<Job>();
JobControl jc = new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING, job_1.getState());
Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),
dependingJobs)));
}
public Job getCopyJob() throws Exception {
Configuration defaults = new Configuration();
FileSystem fs = FileSystem.get(defaults);
Path rootDataDir =
new Path(System.getProperty("test.build.data", "."),
"TestJobControlData");
Path indir = new Path(rootDataDir, "indir");
Path outdir_1 = new Path(rootDataDir, "outdir_1");
JobControlTestUtils.cleanData(fs, indir);
JobControlTestUtils.generateData(fs, indir);
JobControlTestUtils.cleanData(fs, outdir_1);
ArrayList<Job> dependingJobs = null;
ArrayList<Path> inPaths_1 = new ArrayList<Path>();
inPaths_1.add(indir);
JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
Job job_1 = new Job(jobConf_1, dependingJobs);
return job_1;
}
@Test (timeout = 30000)
public void testJobControl() throws Exception {
doJobControlTest();
}
@Test (timeout = 30000)
public void testGetAssignedJobId() throws Exception {
JobConf jc = new JobConf();
Job j = new Job(jc);
//Just make sure no exception is thrown
assertNull(j.getAssignedJobID());
org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
j.setJob(mockjob);
JobID expected = new JobID("test",0);
assertEquals(expected, j.getAssignedJobID());
verify(mockjob).getJobID();
}
public static void main(String[] args) {
TestJobControl test = new TestJobControl();
try {
test.testJobControl();
}
catch (Exception e) {
e.printStackTrace();
}
}
}
| 10,907 | 37.13986 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.JobConf;
/**
* HadoopTestCase that tests the local job runner.
*/
public class TestLocalJobControl extends HadoopTestCase {
public static final Log LOG = LogFactory.getLog(TestLocalJobControl.class
.getName());
/**
* Initialises a new instance of this test case to use a Local MR cluster and
* a local filesystem.
*
* @throws IOException If an error occurs initialising this object.
*/
public TestLocalJobControl() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 2, 2);
}
/**
* This is a main function for testing JobControl class. It first cleans all
* the dirs it will use. Then it generates some random text data in
* TestJobControlData/indir. Then it creates 4 jobs: Job 1: copy data from
* indir to outdir_1 Job 2: copy data from indir to outdir_2 Job 3: copy data
* from outdir_1 and outdir_2 to outdir_3 Job 4: copy data from outdir to
* outdir_4 The jobs 1 and 2 have no dependency. The job 3 depends on jobs 1
* and 2. The job 4 depends on job 3.
*
* Then it creates a JobControl object and add the 4 jobs to the JobControl
* object. Finally, it creates a thread to run the JobControl object and
* monitors/reports the job states.
*/
public void testLocalJobControlDataCopy() throws Exception {
FileSystem fs = FileSystem.get(createJobConf());
Path rootDataDir = new Path(System.getProperty("test.build.data", "."),
"TestLocalJobControlData");
Path indir = new Path(rootDataDir, "indir");
Path outdir_1 = new Path(rootDataDir, "outdir_1");
Path outdir_2 = new Path(rootDataDir, "outdir_2");
Path outdir_3 = new Path(rootDataDir, "outdir_3");
Path outdir_4 = new Path(rootDataDir, "outdir_4");
JobControlTestUtils.cleanData(fs, indir);
JobControlTestUtils.generateData(fs, indir);
JobControlTestUtils.cleanData(fs, outdir_1);
JobControlTestUtils.cleanData(fs, outdir_2);
JobControlTestUtils.cleanData(fs, outdir_3);
JobControlTestUtils.cleanData(fs, outdir_4);
ArrayList<Job> dependingJobs = null;
ArrayList<Path> inPaths_1 = new ArrayList<Path>();
inPaths_1.add(indir);
JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
Job job_1 = new Job(jobConf_1, dependingJobs);
ArrayList<Path> inPaths_2 = new ArrayList<Path>();
inPaths_2.add(indir);
JobConf jobConf_2 = JobControlTestUtils.createCopyJob(inPaths_2, outdir_2);
Job job_2 = new Job(jobConf_2, dependingJobs);
ArrayList<Path> inPaths_3 = new ArrayList<Path>();
inPaths_3.add(outdir_1);
inPaths_3.add(outdir_2);
JobConf jobConf_3 = JobControlTestUtils.createCopyJob(inPaths_3, outdir_3);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_1);
dependingJobs.add(job_2);
Job job_3 = new Job(jobConf_3, dependingJobs);
ArrayList<Path> inPaths_4 = new ArrayList<Path>();
inPaths_4.add(outdir_3);
JobConf jobConf_4 = JobControlTestUtils.createCopyJob(inPaths_4, outdir_4);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_3);
Job job_4 = new Job(jobConf_4, dependingJobs);
JobControl theControl = new JobControl("Test");
theControl.addJob(job_1);
theControl.addJob(job_2);
theControl.addJob(job_3);
theControl.addJob(job_4);
Thread theController = new Thread(theControl);
theController.start();
while (!theControl.allFinished()) {
LOG.debug("Jobs in waiting state: " + theControl.getWaitingJobs().size());
LOG.debug("Jobs in ready state: " + theControl.getReadyJobs().size());
LOG.debug("Jobs in running state: " + theControl.getRunningJobs().size());
LOG.debug("Jobs in success state: "
+ theControl.getSuccessfulJobs().size());
LOG.debug("Jobs in failed state: " + theControl.getFailedJobs().size());
LOG.debug("\n");
try {
Thread.sleep(5000);
} catch (Exception e) {
}
}
assertEquals("Some jobs failed", 0, theControl.getFailedJobs().size());
theControl.stop();
}
}
| 5,213 | 37.338235 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestDatamerge.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.ReflectionUtils;
public class TestDatamerge extends TestCase {
private static MiniDFSCluster cluster = null;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
private static SequenceFile.Writer[] createWriters(Path testdir,
Configuration conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], IntWritable.class, IntWritable.class);
}
return out;
}
private static Path[] writeSimpleSrc(Path testdir, Configuration conf,
int srcs) throws IOException {
SequenceFile.Writer out[] = null;
Path[] src = new Path[srcs];
try {
out = createWriters(testdir, conf, srcs, src);
final int capacity = srcs * 2 + 1;
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int k = 0; k < capacity; ++k) {
for (int i = 0; i < srcs; ++i) {
key.set(k % srcs == 0 ? k * srcs : k * srcs + i);
val.set(10 * k + i);
out[i].append(key, val);
if (i == k) {
// add duplicate key
out[i].append(key, val);
}
}
}
} finally {
if (out != null) {
for (int i = 0; i < srcs; ++i) {
if (out[i] != null)
out[i].close();
}
}
}
return src;
}
private static String stringify(IntWritable key, Writable val) {
StringBuilder sb = new StringBuilder();
sb.append("(" + key);
sb.append("," + val + ")");
return sb.toString();
}
private static abstract class SimpleCheckerBase<V extends Writable>
implements Mapper<IntWritable, V, IntWritable, IntWritable>,
Reducer<IntWritable, IntWritable, Text, Text> {
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void close() { }
public void configure(JobConf job) {
srcs = job.getInt("testdatamerge.sources", 0);
assertTrue("Invalid src count: " + srcs, srcs > 0);
}
public abstract void map(IntWritable key, V val,
OutputCollector<IntWritable, IntWritable> out, Reporter reporter)
throws IOException;
public void reduce(IntWritable key, Iterator<IntWritable> values,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
int seen = 0;
while (values.hasNext()) {
seen += values.next().get();
}
assertTrue("Bad count for " + key.get(), verify(key.get(), seen));
}
public abstract boolean verify(int key, int occ);
}
private static class InnerJoinChecker
extends SimpleCheckerBase<TupleWritable> {
public void map(IntWritable key, TupleWritable val,
OutputCollector<IntWritable, IntWritable> out, Reporter reporter)
throws IOException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
assertTrue(kvstr, 0 == k % (srcs * srcs));
for (int i = 0; i < val.size(); ++i) {
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, (vali - i) * srcs == 10 * k);
}
out.collect(key, one);
}
public boolean verify(int key, int occ) {
return (key == 0 && occ == 2) ||
(key != 0 && (key % (srcs * srcs) == 0) && occ == 1);
}
}
private static class OuterJoinChecker
extends SimpleCheckerBase<TupleWritable> {
public void map(IntWritable key, TupleWritable val,
OutputCollector<IntWritable, IntWritable> out, Reporter reporter)
throws IOException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
if (0 == k % (srcs * srcs)) {
for (int i = 0; i < val.size(); ++i) {
assertTrue(kvstr, val.get(i) instanceof IntWritable);
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, (vali - i) * srcs == 10 * k);
}
} else {
for (int i = 0; i < val.size(); ++i) {
if (i == k % srcs) {
assertTrue(kvstr, val.get(i) instanceof IntWritable);
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, srcs * (vali - i) == 10 * (k - i));
} else {
assertTrue(kvstr, !val.has(i));
}
}
}
out.collect(key, one);
}
public boolean verify(int key, int occ) {
if (key < srcs * srcs && (key % (srcs + 1)) == 0)
return 2 == occ;
return 1 == occ;
}
}
private static class OverrideChecker
extends SimpleCheckerBase<IntWritable> {
public void map(IntWritable key, IntWritable val,
OutputCollector<IntWritable, IntWritable> out, Reporter reporter)
throws IOException {
int k = key.get();
final int vali = val.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
if (0 == k % (srcs * srcs)) {
assertTrue(kvstr, vali == k * 10 / srcs + srcs - 1);
} else {
final int i = k % srcs;
assertTrue(kvstr, srcs * (vali - i) == 10 * (k - i));
}
out.collect(key, one);
}
public boolean verify(int key, int occ) {
if (key < srcs * srcs && (key % (srcs + 1)) == 0 && key != 0)
return 2 == occ;
return 1 == occ;
}
}
private static void joinAs(String jointype,
Class<? extends SimpleCheckerBase> c) throws Exception {
final int srcs = 4;
Configuration conf = new Configuration();
JobConf job = new JobConf(conf, c);
Path base = cluster.getFileSystem().makeQualified(new Path("/"+jointype));
Path[] src = writeSimpleSrc(base, conf, srcs);
job.set("mapreduce.join.expr", CompositeInputFormat.compose(jointype,
SequenceFileInputFormat.class, src));
job.setInt("testdatamerge.sources", srcs);
job.setInputFormat(CompositeInputFormat.class);
FileOutputFormat.setOutputPath(job, new Path(base, "out"));
job.setMapperClass(c);
job.setReducerClass(c);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
JobClient.runJob(job);
base.getFileSystem(job).delete(base, true);
}
public void testSimpleInnerJoin() throws Exception {
joinAs("inner", InnerJoinChecker.class);
}
public void testSimpleOuterJoin() throws Exception {
joinAs("outer", OuterJoinChecker.class);
}
public void testSimpleOverride() throws Exception {
joinAs("override", OverrideChecker.class);
}
public void testNestedJoin() throws Exception {
// outer(inner(S1,...,Sn),outer(S1,...Sn))
final int SOURCES = 3;
final int ITEMS = (SOURCES + 1) * (SOURCES + 1);
JobConf job = new JobConf();
Path base = cluster.getFileSystem().makeQualified(new Path("/nested"));
int[][] source = new int[SOURCES][];
for (int i = 0; i < SOURCES; ++i) {
source[i] = new int[ITEMS];
for (int j = 0; j < ITEMS; ++j) {
source[i][j] = (i + 2) * (j + 1);
}
}
Path[] src = new Path[SOURCES];
SequenceFile.Writer out[] = createWriters(base, job, SOURCES, src);
IntWritable k = new IntWritable();
for (int i = 0; i < SOURCES; ++i) {
IntWritable v = new IntWritable();
v.set(i);
for (int j = 0; j < ITEMS; ++j) {
k.set(source[i][j]);
out[i].append(k, v);
}
out[i].close();
}
out = null;
StringBuilder sb = new StringBuilder();
sb.append("outer(inner(");
for (int i = 0; i < SOURCES; ++i) {
sb.append(
CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[i].toString()));
if (i + 1 != SOURCES) sb.append(",");
}
sb.append("),outer(");
sb.append(CompositeInputFormat.compose(Fake_IF.class,"foobar"));
sb.append(",");
for (int i = 0; i < SOURCES; ++i) {
sb.append(
CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[i].toString()));
sb.append(",");
}
sb.append(CompositeInputFormat.compose(Fake_IF.class,"raboof") + "))");
job.set("mapreduce.join.expr", sb.toString());
job.setInputFormat(CompositeInputFormat.class);
Path outf = new Path(base, "out");
FileOutputFormat.setOutputPath(job, outf);
Fake_IF.setKeyClass(job, IntWritable.class);
Fake_IF.setValClass(job, IntWritable.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(TupleWritable.class);
job.setOutputFormat(SequenceFileOutputFormat.class);
JobClient.runJob(job);
FileStatus[] outlist = cluster.getFileSystem().listStatus(outf,
new Utils.OutputFileUtils.OutputFilesFilter());
assertEquals(1, outlist.length);
assertTrue(0 < outlist[0].getLen());
SequenceFile.Reader r =
new SequenceFile.Reader(cluster.getFileSystem(),
outlist[0].getPath(), job);
TupleWritable v = new TupleWritable();
while (r.next(k, v)) {
assertFalse(((TupleWritable)v.get(1)).has(0));
assertFalse(((TupleWritable)v.get(1)).has(SOURCES + 1));
boolean chk = true;
int ki = k.get();
for (int i = 2; i < SOURCES + 2; ++i) {
if ((ki % i) == 0 && ki <= i * ITEMS) {
assertEquals(i - 2, ((IntWritable)
((TupleWritable)v.get(1)).get((i - 1))).get());
} else chk = false;
}
if (chk) { // present in all sources; chk inner
assertTrue(v.has(0));
for (int i = 0; i < SOURCES; ++i)
assertTrue(((TupleWritable)v.get(0)).has(i));
} else { // should not be present in inner join
assertFalse(v.has(0));
}
}
r.close();
base.getFileSystem(job).delete(base, true);
}
public void testEmptyJoin() throws Exception {
JobConf job = new JobConf();
Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
Fake_IF.class, src));
job.setInputFormat(CompositeInputFormat.class);
FileOutputFormat.setOutputPath(job, new Path(base, "out"));
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setOutputKeyClass(IncomparableKey.class);
job.setOutputValueClass(NullWritable.class);
JobClient.runJob(job);
base.getFileSystem(job).delete(base, true);
}
public static class Fake_IF<K,V>
implements InputFormat<K,V>, JobConfigurable {
public static class FakeSplit implements InputSplit {
public void write(DataOutput out) throws IOException { }
public void readFields(DataInput in) throws IOException { }
public long getLength() { return 0L; }
public String[] getLocations() { return new String[0]; }
}
public static void setKeyClass(JobConf job, Class<?> k) {
job.setClass("test.fakeif.keyclass", k, WritableComparable.class);
}
public static void setValClass(JobConf job, Class<?> v) {
job.setClass("test.fakeif.valclass", v, Writable.class);
}
private Class<? extends K> keyclass;
private Class<? extends V> valclass;
@SuppressWarnings("unchecked")
public void configure(JobConf job) {
keyclass = (Class<? extends K>) job.getClass("test.fakeif.keyclass",
IncomparableKey.class, WritableComparable.class);
valclass = (Class<? extends V>) job.getClass("test.fakeif.valclass",
NullWritable.class, WritableComparable.class);
}
public Fake_IF() { }
public InputSplit[] getSplits(JobConf conf, int splits) {
return new InputSplit[] { new FakeSplit() };
}
public RecordReader<K,V> getRecordReader(
InputSplit ignored, JobConf conf, Reporter reporter) {
return new RecordReader<K,V>() {
public boolean next(K key, V value) throws IOException { return false; }
public K createKey() {
return ReflectionUtils.newInstance(keyclass, null);
}
public V createValue() {
return ReflectionUtils.newInstance(valclass, null);
}
public long getPos() throws IOException { return 0L; }
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
};
}
}
}
| 15,338 | 35.262411 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
public class TestWrappedRecordReaderClassloader extends TestCase {
/**
* Tests the class loader set by {@link JobConf#setClassLoader(ClassLoader)}
* is inherited by any {@link WrappedRecordReader}s created by
* {@link CompositeRecordReader}
*/
public void testClassLoader() throws Exception {
JobConf job = new JobConf();
Fake_ClassLoader classLoader = new Fake_ClassLoader();
job.setClassLoader(classLoader);
assertTrue(job.getClassLoader() instanceof Fake_ClassLoader);
FileSystem fs = FileSystem.get(job);
Path testdir = new Path(System.getProperty("test.build.data", "/tmp"))
.makeQualified(fs);
Path base = new Path(testdir, "/empty");
Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
IF_ClassLoaderChecker.class, src));
CompositeInputFormat<NullWritable> inputFormat = new CompositeInputFormat<NullWritable>();
inputFormat.getRecordReader(inputFormat.getSplits(job, 1)[0], job,
Reporter.NULL);
}
public static class Fake_ClassLoader extends ClassLoader {
}
public static class IF_ClassLoaderChecker<K, V> implements InputFormat<K, V>,
JobConfigurable {
public static class FakeSplit implements InputSplit {
public void write(DataOutput out) throws IOException {
}
public void readFields(DataInput in) throws IOException {
}
public long getLength() {
return 0L;
}
public String[] getLocations() {
return new String[0];
}
}
public static void setKeyClass(JobConf job, Class<?> k) {
job.setClass("test.fakeif.keyclass", k, WritableComparable.class);
}
public static void setValClass(JobConf job, Class<?> v) {
job.setClass("test.fakeif.valclass", v, Writable.class);
}
protected Class<? extends K> keyclass;
protected Class<? extends V> valclass;
@SuppressWarnings("unchecked")
public void configure(JobConf job) {
keyclass = (Class<? extends K>) job.getClass("test.fakeif.keyclass",
NullWritable.class, WritableComparable.class);
valclass = (Class<? extends V>) job.getClass("test.fakeif.valclass",
NullWritable.class, WritableComparable.class);
}
public IF_ClassLoaderChecker() {
}
public InputSplit[] getSplits(JobConf conf, int splits) {
return new InputSplit[] { new FakeSplit() };
}
public RecordReader<K, V> getRecordReader(InputSplit ignored, JobConf job,
Reporter reporter) {
return new RR_ClassLoaderChecker<K, V>(job);
}
}
public static class RR_ClassLoaderChecker<K, V> implements RecordReader<K, V> {
private Class<? extends K> keyclass;
private Class<? extends V> valclass;
@SuppressWarnings("unchecked")
public RR_ClassLoaderChecker(JobConf job) {
assertTrue("The class loader has not been inherited from "
+ CompositeRecordReader.class.getSimpleName(),
job.getClassLoader() instanceof Fake_ClassLoader);
keyclass = (Class<? extends K>) job.getClass("test.fakeif.keyclass",
NullWritable.class, WritableComparable.class);
valclass = (Class<? extends V>) job.getClass("test.fakeif.valclass",
NullWritable.class, WritableComparable.class);
}
public boolean next(K key, V value) throws IOException {
return false;
}
public K createKey() {
return ReflectionUtils.newInstance(keyclass, null);
}
public V createValue() {
return ReflectionUtils.newInstance(valclass, null);
}
public long getPos() throws IOException {
return 0L;
}
public void close() throws IOException {
}
public float getProgress() throws IOException {
return 0.0f;
}
}
}
| 5,303 | 32.358491 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestTupleWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
public class TestTupleWritable extends TestCase {
private TupleWritable makeTuple(Writable[] writs) {
Writable[] sub1 = { writs[1], writs[2] };
Writable[] sub3 = { writs[4], writs[5] };
Writable[] sub2 = { writs[3], new TupleWritable(sub3), writs[6] };
Writable[] vals = { writs[0], new TupleWritable(sub1),
new TupleWritable(sub2), writs[7], writs[8],
writs[9] };
// [v0, [v1, v2], [v3, [v4, v5], v6], v7, v8, v9]
TupleWritable ret = new TupleWritable(vals);
for (int i = 0; i < 6; ++i) {
ret.setWritten(i);
}
((TupleWritable)sub2[1]).setWritten(0);
((TupleWritable)sub2[1]).setWritten(1);
((TupleWritable)vals[1]).setWritten(0);
((TupleWritable)vals[1]).setWritten(1);
for (int i = 0; i < 3; ++i) {
((TupleWritable)vals[2]).setWritten(i);
}
return ret;
}
private Writable[] makeRandomWritables() {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
return writs;
}
private Writable[] makeRandomWritables(int numWrits)
{
Writable[] writs = makeRandomWritables();
Writable[] manyWrits = new Writable[numWrits];
for (int i =0; i<manyWrits.length; i++)
{
manyWrits[i] = writs[i%writs.length];
}
return manyWrits;
}
private int verifIter(Writable[] writs, TupleWritable t, int i) {
for (Writable w : t) {
if (w instanceof TupleWritable) {
i = verifIter(writs, ((TupleWritable)w), i);
continue;
}
assertTrue("Bad value", w.equals(writs[i++]));
}
return i;
}
public void testIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable t = new TupleWritable(writs);
for (int i = 0; i < 6; ++i) {
t.setWritten(i);
}
verifIter(writs, t, 0);
}
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
public void testWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
if (i % 3 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
public void testWideWritable2() throws Exception {
Writable[] manyWrits = makeRandomWritables(71);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
sTuple.setWritten(i);
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
/**
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
if (i % 65 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
public void testWideTuple() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
Arrays.fill(values,emptyText);
values[42] = new Text("Number 42");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(42);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 42) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
}
}
}
public void testWideTuple2() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
Arrays.fill(values,emptyText);
values[9] = new Text("Number 9");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(9);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 9) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
}
}
}
/**
* Tests that we can write more than 64 values.
*/
public void testWideTupleBoundary() throws Exception {
Text emptyText = new Text("Should not be set written");
Writable[] values = new Writable[65];
Arrays.fill(values,emptyText);
values[64] = new Text("Should be the only value set written");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(64);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 64) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
}
}
}
/**
* Tests compatibility with pre-0.21 versions of TupleWritable
*/
public void testPreVersion21Compatibility() throws Exception {
Writable[] manyWrits = makeRandomWritables(64);
PreVersion21TupleWritable oldTuple = new PreVersion21TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++) {
if (i % 3 == 0) {
oldTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
oldTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Tuple writable is unable to read pre-0.21 versions of TupleWritable", oldTuple.isCompatible(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
public void testPreVersion21CompatibilityEmptyTuple() throws Exception {
Writable[] manyWrits = new Writable[0];
PreVersion21TupleWritable oldTuple = new PreVersion21TupleWritable(manyWrits);
// don't set any values written
ByteArrayOutputStream out = new ByteArrayOutputStream();
oldTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Tuple writable is unable to read pre-0.21 versions of TupleWritable", oldTuple.isCompatible(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
/**
* Writes to the DataOutput stream in the same way as pre-0.21 versions of
* {@link TupleWritable#write(DataOutput)}
*/
private static class PreVersion21TupleWritable {
private Writable[] values;
private long written = 0L;
private PreVersion21TupleWritable(Writable[] vals) {
written = 0L;
values = vals;
}
private void setWritten(int i) {
written |= 1L << i;
}
private boolean has(int i) {
return 0 != ((1L << i) & written);
}
private void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, values.length);
WritableUtils.writeVLong(out, written);
for (int i = 0; i < values.length; ++i) {
Text.writeString(out, values[i].getClass().getName());
}
for (int i = 0; i < values.length; ++i) {
if (has(i)) {
values[i].write(out);
}
}
}
public int size() {
return values.length;
}
public boolean isCompatible(TupleWritable that) {
if (this.size() != that.size()) {
return false;
}
for (int i = 0; i < values.length; ++i) {
if (has(i)!=that.has(i)) {
return false;
}
if (has(i) && !values[i].equals(that.get(i))) {
return false;
}
}
return true;
}
}
}
| 12,738 | 32.880319 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/IncomparableKey.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.io.WritableComparable;
public class IncomparableKey implements WritableComparable {
public void write(DataOutput out) { }
public void readFields(DataInput in) { }
public int compareTo(Object o) {
throw new RuntimeException("Should never see this.");
}
}
| 1,193 | 36.3125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipesNonJavaInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.pipes.TestPipeApplication.FakeSplit;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestPipesNonJavaInputFormat {
private static File workSpace = new File("target",
TestPipesNonJavaInputFormat.class.getName() + "-workSpace");
/**
* test PipesNonJavaInputFormat
*/
@Test
public void testFormat() throws IOException {
PipesNonJavaInputFormat inputFormat = new PipesNonJavaInputFormat();
JobConf conf = new JobConf();
Reporter reporter= mock(Reporter.class);
RecordReader<FloatWritable, NullWritable> reader = inputFormat
.getRecordReader(new FakeSplit(), conf, reporter);
assertEquals(0.0f, reader.getProgress(), 0.001);
// input and output files
File input1 = new File(workSpace + File.separator + "input1");
if (!input1.getParentFile().exists()) {
Assert.assertTrue(input1.getParentFile().mkdirs());
}
if (!input1.exists()) {
Assert.assertTrue(input1.createNewFile());
}
File input2 = new File(workSpace + File.separator + "input2");
if (!input2.exists()) {
Assert.assertTrue(input2.createNewFile());
}
// set data for splits
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
StringUtils.escapeString(input1.getAbsolutePath()) + ","
+ StringUtils.escapeString(input2.getAbsolutePath()));
InputSplit[] splits = inputFormat.getSplits(conf, 2);
assertEquals(2, splits.length);
PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader = new PipesNonJavaInputFormat.PipesDummyRecordReader(
conf, splits[0]);
// empty dummyRecordReader
assertNull(dummyRecordReader.createKey());
assertNull(dummyRecordReader.createValue());
assertEquals(0, dummyRecordReader.getPos());
assertEquals(0.0, dummyRecordReader.getProgress(), 0.001);
// test method next
assertTrue(dummyRecordReader.next(new FloatWritable(2.0f), NullWritable.get()));
assertEquals(2.0, dummyRecordReader.getProgress(), 0.001);
dummyRecordReader.close();
}
}
| 3,364 | 35.576087 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
/**
* This is a support class to test Hadoop Pipes when using C++ RecordReaders.
* It defines an InputFormat with InputSplits that are just strings. The
* RecordReaders are not implemented in Java, naturally...
*/
public class WordCountInputFormat
extends FileInputFormat<IntWritable, Text> {
static class WordCountInputSplit implements InputSplit {
private String filename;
WordCountInputSplit() { }
WordCountInputSplit(Path filename) {
this.filename = filename.toUri().getPath();
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, filename);
}
public void readFields(DataInput in) throws IOException {
filename = Text.readString(in);
}
public long getLength() { return 0L; }
public String[] getLocations() { return new String[0]; }
}
public InputSplit[] getSplits(JobConf conf,
int numSplits) throws IOException {
ArrayList<InputSplit> result = new ArrayList<InputSplit>();
FileSystem local = FileSystem.getLocal(conf);
for(Path dir: getInputPaths(conf)) {
for(FileStatus file: local.listStatus(dir)) {
result.add(new WordCountInputSplit(file.getPath()));
}
}
return result.toArray(new InputSplit[result.size()]);
}
public RecordReader<IntWritable, Text> getRecordReader(InputSplit split,
JobConf conf,
Reporter reporter) {
return new RecordReader<IntWritable, Text>(){
public boolean next(IntWritable key, Text value) throws IOException {
return false;
}
public IntWritable createKey() {
return new IntWritable();
}
public Text createValue() {
return new Text();
}
public long getPos() {
return 0;
}
public void close() { }
public float getProgress() {
return 0.0f;
}
};
}
}
| 2,980 | 33.662791 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/PipeReducerStub.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
/*
Stub for TestPipeApplication test. This stub produced test data for main test. Main test checks data
*/
public class PipeReducerStub extends CommonStub {
public static void main(String[] args) {
PipeReducerStub client = new PipeReducerStub();
client.binaryProtocolStub();
}
public void binaryProtocolStub() {
try {
initSoket();
//should be 5
//RUN_REDUCE boolean
WritableUtils.readVInt(dataInput);
WritableUtils.readVInt(dataInput);
int intValue = WritableUtils.readVInt(dataInput);
System.out.println("getIsJavaRecordWriter:" + intValue);
// reduce key
WritableUtils.readVInt(dataInput);
// value of reduce key
BooleanWritable value = new BooleanWritable();
readObject(value, dataInput);
System.out.println("reducer key :" + value);
// reduce value code:
// reduce values
while ((intValue = WritableUtils.readVInt(dataInput)) == 7) {
Text txt = new Text();
// value
readObject(txt, dataInput);
System.out.println("reduce value :" + txt);
}
// done
WritableUtils.writeVInt(dataOut, 54);
dataOut.flush();
dataOut.close();
} catch (Exception x) {
x.printStackTrace();
} finally {
closeSoket();
}
}
}
| 2,296 | 27.358025 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/PipeApplicationRunnableStub.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
/*
Stub for TestPipeApplication test. This stub produced test data for main test. Main test checks data
*/
public class PipeApplicationRunnableStub extends CommonStub {
public static void main(String[] args) {
PipeApplicationRunnableStub client = new PipeApplicationRunnableStub();
client.binaryProtocolStub();
}
public void binaryProtocolStub() {
try {
initSoket();
System.out.println("start OK");
// RUN_MAP.code
// should be 3
int answer = WritableUtils.readVInt(dataInput);
System.out.println("RunMap:" + answer);
TestPipeApplication.FakeSplit split = new TestPipeApplication.FakeSplit();
readObject(split, dataInput);
WritableUtils.readVInt(dataInput);
WritableUtils.readVInt(dataInput);
// end runMap
// get InputTypes
WritableUtils.readVInt(dataInput);
String inText = Text.readString(dataInput);
System.out.println("Key class:" + inText);
inText = Text.readString(dataInput);
System.out.println("Value class:" + inText);
@SuppressWarnings("unused")
int inCode = 0;
// read all data from sender and write to output
while ((inCode = WritableUtils.readVInt(dataInput)) == 4) {
FloatWritable key = new FloatWritable();
NullWritable value = NullWritable.get();
readObject(key, dataInput);
System.out.println("value:" + key.get());
readObject(value, dataInput);
}
WritableUtils.writeVInt(dataOut, 54);
dataOut.flush();
dataOut.close();
} catch (Exception x) {
x.printStackTrace();
} finally {
closeSoket();
}
}
}
| 2,682 | 29.488636 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestPipeApplication {
private static File workSpace = new File("target",
TestPipeApplication.class.getName() + "-workSpace");
private static String taskName = "attempt_001_02_r03_04_05";
/**
* test PipesMapRunner test the transfer data from reader
*
* @throws Exception
*/
@Test
public void testRunner() throws Exception {
// clean old password files
File[] psw = cleanTokenPasswordFile();
try {
RecordReader<FloatWritable, NullWritable> rReader = new ReaderPipesMapRunner();
JobConf conf = new JobConf();
conf.set(Submitter.IS_JAVA_RR, "true");
// for stdour and stderror
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
CombineOutputCollector<IntWritable, Text> output = new CombineOutputCollector<IntWritable, Text>(
new Counters.Counter(), new Progress());
FileSystem fs = new RawLocalFileSystem();
fs.setConf(conf);
Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(
new Path(workSpace + File.separator + "outfile")), IntWritable.class,
Text.class, null, null, true);
output.setWriter(wr);
// stub for client
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
// token for authorization
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>(
"user".getBytes(), "password".getBytes(), new Text("kind"), new Text(
"service"));
TokenCache.setJobToken(token, conf.getCredentials());
conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
TestTaskReporter reporter = new TestTaskReporter();
PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text> runner = new PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text>();
initStdOut(conf);
runner.configure(conf);
runner.run(rReader, output, reporter);
String stdOut = readStdOut(conf);
// test part of translated data. As common file for client and test -
// clients stdOut
// check version
assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
// check key and value classes
assertTrue(stdOut
.contains("Key class:org.apache.hadoop.io.FloatWritable"));
assertTrue(stdOut
.contains("Value class:org.apache.hadoop.io.NullWritable"));
// test have sent all data from reader
assertTrue(stdOut.contains("value:0.0"));
assertTrue(stdOut.contains("value:9.0"));
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
*
* @throws Throwable
*/
@Test
public void testApplication() throws Throwable {
JobConf conf = new JobConf();
RecordReader<FloatWritable, NullWritable> rReader = new Reader();
// client for test
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter = new TestTaskReporter();
File[] psw = cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
// token for authorization
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>(
"user".getBytes(), "password".getBytes(), new Text("kind"), new Text(
"service"));
TokenCache.setJobToken(token, conf.getCredentials());
FakeCollector output = new FakeCollector(new Counters.Counter(),
new Progress());
FileSystem fs = new RawLocalFileSystem();
fs.setConf(conf);
Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(
new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),
IntWritable.class, Text.class, null, null, true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE, "true");
initStdOut(conf);
Application<WritableComparable<IntWritable>, Writable, IntWritable, Text> application = new Application<WritableComparable<IntWritable>, Writable, IntWritable, Text>(
conf, rReader, output, reporter, IntWritable.class, Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3), new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
// test getDownlink().mapItem();
String stdOut = readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
// reporter test counter, and status should be sended
// test MessageType.REGISTER_COUNTER and INCREMENT_COUNTER
assertEquals(1.0, reporter.getProgress(), 0.01);
assertNotNull(reporter.getCounter("group", "name"));
// test status MessageType.STATUS
assertEquals(reporter.getStatus(), "PROGRESS");
stdOut = readFile(new File(workSpace.getAbsolutePath() + File.separator
+ "outfile"));
// check MessageType.PROGRESS
assertEquals(0.55f, rReader.getProgress(), 0.001);
application.getDownlink().close();
// test MessageType.OUTPUT
Entry<IntWritable, Text> entry = output.getCollect().entrySet()
.iterator().next();
assertEquals(123, entry.getKey().get());
assertEquals("value", entry.getValue().toString());
try {
// try to abort
application.abort(new Throwable());
fail();
} catch (IOException e) {
// abort works ?
assertEquals("pipe child exception", e.getMessage());
}
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
/**
* test org.apache.hadoop.mapred.pipes.Submitter
*
* @throws Exception
*/
@Test
public void testSubmitter() throws Exception {
JobConf conf = new JobConf();
File[] psw = cleanTokenPasswordFile();
System.setProperty("test.build.data",
"target/tmp/build/TEST_SUBMITTER_MAPPER/data");
conf.set("hadoop.log.dir", "target/tmp");
// prepare configuration
Submitter.setIsJavaMapper(conf, false);
Submitter.setIsJavaReducer(conf, false);
Submitter.setKeepCommandFile(conf, false);
Submitter.setIsJavaRecordReader(conf, false);
Submitter.setIsJavaRecordWriter(conf, false);
PipesPartitioner<IntWritable, Text> partitioner = new PipesPartitioner<IntWritable, Text>();
partitioner.configure(conf);
Submitter.setJavaPartitioner(conf, partitioner.getClass());
assertEquals(PipesPartitioner.class, (Submitter.getJavaPartitioner(conf)));
// test going to call main method with System.exit(). Change Security
SecurityManager securityManager = System.getSecurityManager();
// store System.out
PrintStream oldps = System.out;
ByteArrayOutputStream out = new ByteArrayOutputStream();
ExitUtil.disableSystemExit();
// test without parameters
try {
System.setOut(new PrintStream(out));
Submitter.main(new String[0]);
fail();
} catch (ExitUtil.ExitException e) {
// System.exit prohibited! output message test
assertTrue(out.toString().contains(""));
assertTrue(out.toString(), out.toString().contains("pipes"));
assertTrue(out.toString().contains("[-input <path>] // Input directory"));
assertTrue(out.toString()
.contains("[-output <path>] // Output directory"));
assertTrue(out.toString().contains("[-jar <jar file> // jar filename"));
assertTrue(out.toString().contains(
"[-inputformat <class>] // InputFormat class"));
assertTrue(out.toString().contains("[-map <class>] // Java Map class"));
assertTrue(out.toString().contains(
"[-partitioner <class>] // Java Partitioner"));
assertTrue(out.toString().contains(
"[-reduce <class>] // Java Reduce class"));
assertTrue(out.toString().contains(
"[-writer <class>] // Java RecordWriter"));
assertTrue(out.toString().contains(
"[-program <executable>] // executable URI"));
assertTrue(out.toString().contains(
"[-reduces <num>] // number of reduces"));
assertTrue(out.toString().contains(
"[-lazyOutput <true/false>] // createOutputLazily"));
assertTrue(out
.toString()
.contains(
"-conf <configuration file> specify an application configuration file"));
assertTrue(out.toString().contains(
"-D <property=value> use value for given property"));
assertTrue(out.toString().contains(
"-fs <local|namenode:port> specify a namenode"));
assertTrue(out.toString().contains(
"-jt <local|resourcemanager:port> specify a ResourceManager"));
assertTrue(out
.toString()
.contains(
"-files <comma separated list of files> specify comma separated files to be copied to the map reduce cluster"));
assertTrue(out
.toString()
.contains(
"-libjars <comma separated list of jars> specify comma separated jar files to include in the classpath."));
assertTrue(out
.toString()
.contains(
"-archives <comma separated list of archives> specify comma separated archives to be unarchived on the compute machines."));
} finally {
System.setOut(oldps);
// restore
System.setSecurityManager(securityManager);
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
// test call Submitter form command line
try {
File fCommand = getFileCommand(null);
String[] args = new String[22];
File input = new File(workSpace + File.separator + "input");
if (!input.exists()) {
Assert.assertTrue(input.createNewFile());
}
File outPut = new File(workSpace + File.separator + "output");
FileUtil.fullyDelete(outPut);
args[0] = "-input";
args[1] = input.getAbsolutePath();// "input";
args[2] = "-output";
args[3] = outPut.getAbsolutePath();// "output";
args[4] = "-inputformat";
args[5] = "org.apache.hadoop.mapred.TextInputFormat";
args[6] = "-map";
args[7] = "org.apache.hadoop.mapred.lib.IdentityMapper";
args[8] = "-partitioner";
args[9] = "org.apache.hadoop.mapred.pipes.PipesPartitioner";
args[10] = "-reduce";
args[11] = "org.apache.hadoop.mapred.lib.IdentityReducer";
args[12] = "-writer";
args[13] = "org.apache.hadoop.mapred.TextOutputFormat";
args[14] = "-program";
args[15] = fCommand.getAbsolutePath();// "program";
args[16] = "-reduces";
args[17] = "2";
args[18] = "-lazyOutput";
args[19] = "lazyOutput";
args[20] = "-jobconf";
args[21] = "mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false";
Submitter.main(args);
fail();
} catch (ExitUtil.ExitException e) {
// status should be 0
assertEquals(e.status, 0);
} finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
}
}
/**
* test org.apache.hadoop.mapred.pipes.PipesReducer
* test the transfer of data: key and value
*
* @throws Exception
*/
@Test
public void testPipesReduser() throws Exception {
File[] psw = cleanTokenPasswordFile();
JobConf conf = new JobConf();
try {
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>(
"user".getBytes(), "password".getBytes(), new Text("kind"), new Text(
"service"));
TokenCache.setJobToken(token, conf.getCredentials());
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeReducerStub");
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
PipesReducer<BooleanWritable, Text, IntWritable, Text> reducer = new PipesReducer<BooleanWritable, Text, IntWritable, Text>();
reducer.configure(conf);
BooleanWritable bw = new BooleanWritable(true);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
initStdOut(conf);
conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
CombineOutputCollector<IntWritable, Text> output = new CombineOutputCollector<IntWritable, Text>(
new Counters.Counter(), new Progress());
Reporter reporter = new TestTaskReporter();
List<Text> texts = new ArrayList<Text>();
texts.add(new Text("first"));
texts.add(new Text("second"));
texts.add(new Text("third"));
reducer.reduce(bw, texts.iterator(), output, reporter);
reducer.close();
String stdOut = readStdOut(conf);
// test data: key
assertTrue(stdOut.contains("reducer key :true"));
// and values
assertTrue(stdOut.contains("reduce value :first"));
assertTrue(stdOut.contains("reduce value :second"));
assertTrue(stdOut.contains("reduce value :third"));
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
/**
* test PipesPartitioner
* test set and get data from PipesPartitioner
*/
@Test
public void testPipesPartitioner() {
PipesPartitioner<IntWritable, Text> partitioner = new PipesPartitioner<IntWritable, Text>();
JobConf configuration = new JobConf();
Submitter.getJavaPartitioner(configuration);
partitioner.configure(new JobConf());
IntWritable iw = new IntWritable(4);
// the cache empty
assertEquals(0, partitioner.getPartition(iw, new Text("test"), 2));
// set data into cache
PipesPartitioner.setNextPartition(3);
// get data from cache
assertEquals(3, partitioner.getPartition(iw, new Text("test"), 2));
}
/**
* clean previous std error and outs
*/
private void initStdOut(JobConf configuration) {
TaskAttemptID taskId = TaskAttemptID.forName(configuration
.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
// prepare folder
if (!stdOut.getParentFile().exists()) {
stdOut.getParentFile().mkdirs();
} else { // clean logs
stdOut.deleteOnExit();
stdErr.deleteOnExit();
}
}
private String readStdOut(JobConf conf) throws Exception {
TaskAttemptID taskId = TaskAttemptID.forName(conf
.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
return readFile(stdOut);
}
private String readFile(File file) throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
InputStream is = new FileInputStream(file);
byte[] buffer = new byte[1024];
int counter = 0;
while ((counter = is.read(buffer)) >= 0) {
out.write(buffer, 0, counter);
}
is.close();
return out.toString();
}
private class Progress implements Progressable {
@Override
public void progress() {
}
}
private File[] cleanTokenPasswordFile() throws Exception {
File[] result = new File[2];
result[0] = new File("./jobTokenPassword");
if (result[0].exists()) {
FileUtil.chmod(result[0].getAbsolutePath(), "700");
assertTrue(result[0].delete());
}
result[1] = new File("./.jobTokenPassword.crc");
if (result[1].exists()) {
FileUtil.chmod(result[1].getAbsolutePath(), "700");
result[1].delete();
}
return result;
}
private File getFileCommand(String clazz) throws Exception {
String classpath = System.getProperty("java.class.path");
File fCommand = new File(workSpace + File.separator + "cache.sh");
fCommand.deleteOnExit();
if (!fCommand.getParentFile().exists()) {
fCommand.getParentFile().mkdirs();
}
fCommand.createNewFile();
OutputStream os = new FileOutputStream(fCommand);
os.write("#!/bin/sh \n".getBytes());
if (clazz == null) {
os.write(("ls ").getBytes());
} else {
os.write(("java -cp " + classpath + " " + clazz).getBytes());
}
os.flush();
os.close();
FileUtil.chmod(fCommand.getAbsolutePath(), "700");
return fCommand;
}
private class CombineOutputCollector<K, V extends Object> implements
OutputCollector<K, V> {
private Writer<K, V> writer;
private Counters.Counter outCounter;
private Progressable progressable;
public CombineOutputCollector(Counters.Counter outCounter,
Progressable progressable) {
this.outCounter = outCounter;
this.progressable = progressable;
}
public synchronized void setWriter(Writer<K, V> writer) {
this.writer = writer;
}
public synchronized void collect(K key, V value) throws IOException {
outCounter.increment(1);
writer.append(key, value);
progressable.progress();
}
}
public static class FakeSplit implements InputSplit {
public void write(DataOutput out) throws IOException {
}
public void readFields(DataInput in) throws IOException {
}
public long getLength() {
return 0L;
}
public String[] getLocations() {
return new String[0];
}
}
private class TestTaskReporter implements Reporter {
private int recordNum = 0; // number of records processed
private String status = null;
private Counters counters = new Counters();
private InputSplit split = new FakeSplit();
@Override
public void progress() {
recordNum++;
}
@Override
public void setStatus(String status) {
this.status = status;
}
public String getStatus() {
return this.status;
}
public Counters.Counter getCounter(String group, String name) {
Counters.Counter counter = null;
if (counters != null) {
counter = counters.findCounter(group, name);
if (counter == null) {
Group grp = counters.addGroup(group, group);
counter = grp.addCounter(name, name, 10);
}
}
return counter;
}
public Counters.Counter getCounter(Enum<?> name) {
return counters == null ? null : counters.findCounter(name);
}
public void incrCounter(Enum<?> key, long amount) {
if (counters != null) {
counters.incrCounter(key, amount);
}
}
public void incrCounter(String group, String counter, long amount) {
if (counters != null) {
counters.incrCounter(group, counter, amount);
}
}
@Override
public InputSplit getInputSplit() throws UnsupportedOperationException {
return split;
}
@Override
public float getProgress() {
return recordNum;
}
}
private class Reader implements RecordReader<FloatWritable, NullWritable> {
private int index = 0;
private FloatWritable progress;
@Override
public boolean next(FloatWritable key, NullWritable value)
throws IOException {
progress = key;
index++;
return index <= 10;
}
@Override
public float getProgress() throws IOException {
return progress.get();
}
@Override
public long getPos() throws IOException {
return index;
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public FloatWritable createKey() {
FloatWritable result = new FloatWritable(index);
return result;
}
@Override
public void close() throws IOException {
}
}
private class ReaderPipesMapRunner implements RecordReader<FloatWritable, NullWritable> {
private int index = 0;
@Override
public boolean next(FloatWritable key, NullWritable value)
throws IOException {
key.set(index++);
return index <= 10;
}
@Override
public float getProgress() throws IOException {
return index;
}
@Override
public long getPos() throws IOException {
return index;
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public FloatWritable createKey() {
FloatWritable result = new FloatWritable(index);
return result;
}
@Override
public void close() throws IOException {
}
}
private class FakeCollector extends
CombineOutputCollector<IntWritable, Text> {
final private Map<IntWritable, Text> collect = new HashMap<IntWritable, Text>();
public FakeCollector(Counter outCounter, Progressable progressable) {
super(outCounter, progressable);
}
@Override
public synchronized void collect(IntWritable key, Text value)
throws IOException {
collect.put(key, value);
super.collect(key, value);
}
public Map<IntWritable, Text> getCollect() {
return collect;
}
}
}
| 24,233 | 31.268975 | 172 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Ignore;
import junit.framework.TestCase;
@Ignore
public class TestPipes extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestPipes.class.getName());
private static Path cppExamples =
new Path(System.getProperty("install.c++.examples"));
static Path wordCountSimple =
new Path(cppExamples, "bin/wordcount-simple");
static Path wordCountPart =
new Path(cppExamples, "bin/wordcount-part");
static Path wordCountNoPipes =
new Path(cppExamples,"bin/wordcount-nopipe");
static Path nonPipedOutDir;
static void cleanup(FileSystem fs, Path p) throws IOException {
fs.delete(p, true);
assertFalse("output not cleaned up", fs.exists(p));
}
public void testPipes() throws IOException {
if (System.getProperty("compile.c++") == null) {
LOG.info("compile.c++ is not defined, so skipping TestPipes");
return;
}
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
Path inputPath = new Path("testing/in");
Path outputPath = new Path("testing/out");
try {
final int numSlaves = 2;
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves).build();
mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getUri().toString(), 1);
writeInputFile(dfs.getFileSystem(), inputPath);
runProgram(mr, dfs, wordCountSimple,
inputPath, outputPath, 3, 2, twoSplitOutput, null);
cleanup(dfs.getFileSystem(), outputPath);
runProgram(mr, dfs, wordCountSimple,
inputPath, outputPath, 3, 0, noSortOutput, null);
cleanup(dfs.getFileSystem(), outputPath);
runProgram(mr, dfs, wordCountPart,
inputPath, outputPath, 3, 2, fixedPartitionOutput, null);
runNonPipedProgram(mr, dfs, wordCountNoPipes, null);
mr.waitUntilIdle();
} finally {
mr.shutdown();
dfs.shutdown();
}
}
final static String[] twoSplitOutput = new String[] {
"`and\t1\na\t1\nand\t1\nbeginning\t1\nbook\t1\nbut\t1\nby\t1\n" +
"conversation?'\t1\ndo:\t1\nhad\t2\nhaving\t1\nher\t2\nin\t1\nit\t1\n"+
"it,\t1\nno\t1\nnothing\t1\nof\t3\non\t1\nonce\t1\nor\t3\npeeped\t1\n"+
"pictures\t2\nthe\t3\nthought\t1\nto\t2\nuse\t1\nwas\t2\n",
"Alice\t2\n`without\t1\nbank,\t1\nbook,'\t1\nconversations\t1\nget\t1\n" +
"into\t1\nis\t1\nreading,\t1\nshe\t1\nsister\t2\nsitting\t1\ntired\t1\n" +
"twice\t1\nvery\t1\nwhat\t1\n"
};
final static String[] noSortOutput = new String[] {
"it,\t1\n`and\t1\nwhat\t1\nis\t1\nthe\t1\nuse\t1\nof\t1\na\t1\n" +
"book,'\t1\nthought\t1\nAlice\t1\n`without\t1\npictures\t1\nor\t1\n"+
"conversation?'\t1\n",
"Alice\t1\nwas\t1\nbeginning\t1\nto\t1\nget\t1\nvery\t1\ntired\t1\n"+
"of\t1\nsitting\t1\nby\t1\nher\t1\nsister\t1\non\t1\nthe\t1\nbank,\t1\n"+
"and\t1\nof\t1\nhaving\t1\nnothing\t1\nto\t1\ndo:\t1\nonce\t1\n",
"or\t1\ntwice\t1\nshe\t1\nhad\t1\npeeped\t1\ninto\t1\nthe\t1\nbook\t1\n"+
"her\t1\nsister\t1\nwas\t1\nreading,\t1\nbut\t1\nit\t1\nhad\t1\nno\t1\n"+
"pictures\t1\nor\t1\nconversations\t1\nin\t1\n"
};
final static String[] fixedPartitionOutput = new String[] {
"Alice\t2\n`and\t1\n`without\t1\na\t1\nand\t1\nbank,\t1\nbeginning\t1\n" +
"book\t1\nbook,'\t1\nbut\t1\nby\t1\nconversation?'\t1\nconversations\t1\n"+
"do:\t1\nget\t1\nhad\t2\nhaving\t1\nher\t2\nin\t1\ninto\t1\nis\t1\n" +
"it\t1\nit,\t1\nno\t1\nnothing\t1\nof\t3\non\t1\nonce\t1\nor\t3\n" +
"peeped\t1\npictures\t2\nreading,\t1\nshe\t1\nsister\t2\nsitting\t1\n" +
"the\t3\nthought\t1\ntired\t1\nto\t2\ntwice\t1\nuse\t1\n" +
"very\t1\nwas\t2\nwhat\t1\n",
""
};
static void writeInputFile(FileSystem fs, Path dir) throws IOException {
DataOutputStream out = fs.create(new Path(dir, "part0"));
out.writeBytes("Alice was beginning to get very tired of sitting by her\n");
out.writeBytes("sister on the bank, and of having nothing to do: once\n");
out.writeBytes("or twice she had peeped into the book her sister was\n");
out.writeBytes("reading, but it had no pictures or conversations in\n");
out.writeBytes("it, `and what is the use of a book,' thought Alice\n");
out.writeBytes("`without pictures or conversation?'\n");
out.close();
}
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs,
Path program, Path inputPath, Path outputPath,
int numMaps, int numReduces, String[] expectedResults,
JobConf conf
) throws IOException {
Path wordExec = new Path("testing/bin/application");
JobConf job = null;
if(conf == null) {
job = mr.createJobConf();
}else {
job = new JobConf(conf);
}
job.setNumMapTasks(numMaps);
job.setNumReduceTasks(numReduces);
{
FileSystem fs = dfs.getFileSystem();
fs.delete(wordExec.getParent(), true);
fs.copyFromLocalFile(program, wordExec);
Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
Submitter.setIsJavaRecordReader(job, true);
Submitter.setIsJavaRecordWriter(job, true);
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
RunningJob rJob = null;
if (numReduces == 0) {
rJob = Submitter.jobSubmit(job);
while (!rJob.isComplete()) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
}
} else {
rJob = Submitter.runJob(job);
}
assertTrue("pipes job failed", rJob.isSuccessful());
Counters counters = rJob.getCounters();
Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
int numCounters = 0;
for (Counter c : wordCountCounters) {
System.out.println(c);
++numCounters;
}
assertTrue("No counters found!", (numCounters > 0));
}
List<String> results = new ArrayList<String>();
for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
new Utils.OutputFileUtils
.OutputFilesFilter()))) {
results.add(MapReduceTestUtil.readOutput(p, job));
}
assertEquals("number of reduces is wrong",
expectedResults.length, results.size());
for(int i=0; i < results.size(); i++) {
assertEquals("pipes program " + program + " output " + i + " wrong",
expectedResults[i], results.get(i));
}
}
/**
* Run a map/reduce word count that does all of the map input and reduce
* output directly rather than sending it back up to Java.
* @param mr The mini mr cluster
* @param dfs the dfs cluster
* @param program the program to run
* @throws IOException
*/
static void runNonPipedProgram(MiniMRCluster mr, MiniDFSCluster dfs,
Path program, JobConf conf) throws IOException {
JobConf job;
if(conf == null) {
job = mr.createJobConf();
}else {
job = new JobConf(conf);
}
job.setInputFormat(WordCountInputFormat.class);
FileSystem local = FileSystem.getLocal(job);
Path testDir = new Path("file:" + System.getProperty("test.build.data"),
"pipes");
Path inDir = new Path(testDir, "input");
nonPipedOutDir = new Path(testDir, "output");
Path wordExec = new Path("testing/bin/application");
Path jobXml = new Path(testDir, "job.xml");
{
FileSystem fs = dfs.getFileSystem();
fs.delete(wordExec.getParent(), true);
fs.copyFromLocalFile(program, wordExec);
}
DataOutputStream out = local.create(new Path(inDir, "part0"));
out.writeBytes("i am a silly test\n");
out.writeBytes("you are silly\n");
out.writeBytes("i am a cat test\n");
out.writeBytes("you is silly\n");
out.writeBytes("i am a billy test\n");
out.writeBytes("hello are silly\n");
out.close();
out = local.create(new Path(inDir, "part1"));
out.writeBytes("mall world things drink java\n");
out.writeBytes("hall silly cats drink java\n");
out.writeBytes("all dogs bow wow\n");
out.writeBytes("hello drink java\n");
out.close();
local.delete(nonPipedOutDir, true);
local.mkdirs(nonPipedOutDir, new FsPermission(FsAction.ALL, FsAction.ALL,
FsAction.ALL));
out = local.create(jobXml);
job.writeXml(out);
out.close();
System.err.println("About to run: Submitter -conf " + jobXml +
" -input " + inDir + " -output " + nonPipedOutDir +
" -program " +
dfs.getFileSystem().makeQualified(wordExec));
try {
int ret = ToolRunner.run(new Submitter(),
new String[]{"-conf", jobXml.toString(),
"-input", inDir.toString(),
"-output", nonPipedOutDir.toString(),
"-program",
dfs.getFileSystem().makeQualified(wordExec).toString(),
"-reduces", "2"});
assertEquals(0, ret);
} catch (Exception e) {
assertTrue("got exception: " + StringUtils.stringifyException(e), false);
}
}
}
| 11,407 | 39.597865 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/PipeApplicationStub.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
/*
Stub for TestPipeApplication test. This stub produced test data for main test. Main test checks data
*/
public class PipeApplicationStub extends CommonStub {
public static void main(String[] args) {
PipeApplicationStub client = new PipeApplicationStub();
client.binaryProtocolStub();
}
public void binaryProtocolStub() {
try {
initSoket();
// output code
WritableUtils.writeVInt(dataOut, 50);
IntWritable wt = new IntWritable();
wt.set(123);
writeObject(wt, dataOut);
writeObject(new Text("value"), dataOut);
// PARTITIONED_OUTPUT
WritableUtils.writeVInt(dataOut, 51);
WritableUtils.writeVInt(dataOut, 0);
writeObject(wt, dataOut);
writeObject(new Text("value"), dataOut);
// STATUS
WritableUtils.writeVInt(dataOut, 52);
Text.writeString(dataOut, "PROGRESS");
dataOut.flush();
// progress
WritableUtils.writeVInt(dataOut, 53);
dataOut.writeFloat(0.55f);
// register counter
WritableUtils.writeVInt(dataOut, 55);
// id
WritableUtils.writeVInt(dataOut, 0);
Text.writeString(dataOut, "group");
Text.writeString(dataOut, "name");
// increment counter
WritableUtils.writeVInt(dataOut, 56);
WritableUtils.writeVInt(dataOut, 0);
WritableUtils.writeVLong(dataOut, 2);
// map item
int intValue = WritableUtils.readVInt(dataInput);
System.out.println("intValue:" + intValue);
IntWritable iw = new IntWritable();
readObject(iw, dataInput);
System.out.println("key:" + iw.get());
Text txt = new Text();
readObject(txt, dataInput);
System.out.println("value:" + txt.toString());
// done
// end of session
WritableUtils.writeVInt(dataOut, 54);
System.out.println("finish");
dataOut.flush();
dataOut.close();
} catch (Exception x) {
x.printStackTrace();
} finally {
closeSoket();
}
}
}
| 2,972 | 28.147059 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/CommonStub.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import javax.crypto.SecretKey;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
public class CommonStub {
protected Socket socket = null;
protected DataInputStream dataInput;
protected DataOutputStream dataOut;
protected String createDigest(byte[] password, String data) throws IOException {
SecretKey key = JobTokenSecretManager.createSecretKey(password);
return SecureShuffleUtils.hashFromString(data, key);
}
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
int numBytes = WritableUtils.readVInt(inStream);
byte[] buffer;
// For BytesWritable and Text, use the specified length to set the length
// this causes the "obvious" translations to work. So that if you emit
// a string "abc" from C++, it shows up as "abc".
if (obj instanceof BytesWritable) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((BytesWritable) obj).set(buffer, 0, numBytes);
} else if (obj instanceof Text) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((Text) obj).set(buffer);
} else {
obj.readFields(inStream);
}
}
protected void writeObject(Writable obj, DataOutputStream stream)
throws IOException {
// For Text and BytesWritable, encode them directly, so that they end up
// in C++ as the natural translations.
DataOutputBuffer buffer = new DataOutputBuffer();
if (obj instanceof Text) {
Text t = (Text) obj;
int len = t.getLength();
WritableUtils.writeVLong(stream, len);
stream.flush();
stream.write(t.getBytes(), 0, len);
stream.flush();
} else if (obj instanceof BytesWritable) {
BytesWritable b = (BytesWritable) obj;
int len = b.getLength();
WritableUtils.writeVLong(stream, len);
stream.write(b.getBytes(), 0, len);
} else {
buffer.reset();
obj.write(buffer);
int length = buffer.getLength();
WritableUtils.writeVInt(stream, length);
stream.write(buffer.getData(), 0, length);
}
stream.flush();
}
protected void initSoket() throws Exception {
int port = Integer.parseInt(System.getenv("mapreduce.pipes.command.port"));
java.net.InetAddress address = java.net.InetAddress.getLocalHost();
socket = new Socket(address.getHostName(), port);
InputStream input = socket.getInputStream();
OutputStream output = socket.getOutputStream();
// try to read
dataInput = new DataInputStream(input);
WritableUtils.readVInt(dataInput);
String str = Text.readString(dataInput);
Text.readString(dataInput);
dataOut = new DataOutputStream(output);
WritableUtils.writeVInt(dataOut, 57);
String s = createDigest("password".getBytes(), str);
Text.writeString(dataOut, s);
// start
WritableUtils.readVInt(dataInput);
int cuttentAnswer = WritableUtils.readVInt(dataInput);
System.out.println("CURRENT_PROTOCOL_VERSION:" + cuttentAnswer);
// get configuration
// should be MessageType.SET_JOB_CONF.code
WritableUtils.readVInt(dataInput);
// array length
int j = WritableUtils.readVInt(dataInput);
for (int i = 0; i < j; i++) {
Text.readString(dataInput);
i++;
Text.readString(dataInput);
}
}
protected void closeSoket() {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
| 4,792 | 30.123377 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.Utils;
import org.junit.After;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
public class TestKeyFieldBasedComparator extends HadoopTestCase {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
"TestKeyFieldBasedComparator-lib");
JobConf conf;
JobConf localConf;
String line1 = "123 -123 005120 123.9 0.01 0.18 010 10.0 4444.1 011 011 234";
String line2 = "134 -12 005100 123.10 -1.01 0.19 02 10.1 4444";
public TestKeyFieldBasedComparator() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
conf = createJobConf();
localConf = createJobConf();
localConf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
}
public void configure(String keySpec, int expect) throws Exception {
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = getFileSystem();
fs.delete(testdir, true);
conf.setInputFormat(TextInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
conf.setKeyFieldComparatorOptions(keySpec);
conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
conf.setMapperClass(InverseMapper.class);
conf.setReducerClass(IdentityReducer.class);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
// set up input data in 2 files
Path inFile = new Path(inDir, "part0");
FileOutputStream fos = new FileOutputStream(inFile.toString());
fos.write((line1 + "\n").getBytes());
fos.write((line2 + "\n").getBytes());
fos.close();
JobClient jc = new JobClient(conf);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
//make sure we get what we expect as the first line, and also
//that we have two lines
if (expect == 1) {
assertTrue(line.startsWith(line1));
} else if (expect == 2) {
assertTrue(line.startsWith(line2));
}
line = reader.readLine();
if (expect == 1) {
assertTrue(line.startsWith(line2));
} else if (expect == 2) {
assertTrue(line.startsWith(line1));
}
reader.close();
}
}
@After
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
@Test
public void testBasicUnixComparator() throws Exception {
configure("-k1,1n", 1);
configure("-k2,2n", 1);
configure("-k2.2,2n", 2);
configure("-k3.4,3n", 2);
configure("-k3.2,3.3n -k4,4n", 2);
configure("-k3.2,3.3n -k4,4nr", 1);
configure("-k2.4,2.4n", 2);
configure("-k7,7", 1);
configure("-k7,7n", 2);
configure("-k8,8n", 1);
configure("-k9,9", 2);
configure("-k11,11",2);
configure("-k10,10",2);
localTestWithoutMRJob("-k9,9", 1);
}
byte[] line1_bytes = line1.getBytes();
byte[] line2_bytes = line2.getBytes();
public void localTestWithoutMRJob(String keySpec, int expect) throws Exception {
KeyFieldBasedComparator<Void, Void> keyFieldCmp = new KeyFieldBasedComparator<Void, Void>();
localConf.setKeyFieldComparatorOptions(keySpec);
keyFieldCmp.configure(localConf);
int result = keyFieldCmp.compare(line1_bytes, 0, line1_bytes.length,
line2_bytes, 0, line2_bytes.length);
if ((expect >= 0 && result < 0) || (expect < 0 && result >= 0))
fail();
}
}
| 6,080 | 34.561404 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestMultipleOutputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.apache.hadoop.mapred.*;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Iterator;
public class TestMultipleOutputs extends HadoopTestCase {
public TestMultipleOutputs() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testWithoutCounters() throws Exception {
_testMultipleOutputs(false);
_testMOWithJavaSerialization(false);
}
public void testWithCounters() throws Exception {
_testMultipleOutputs(true);
_testMOWithJavaSerialization(true);
}
private static final Path ROOT_DIR = new Path("testing/mo");
private static final Path IN_DIR = new Path(ROOT_DIR, "input");
private static final Path OUT_DIR = new Path(ROOT_DIR, "output");
private Path getDir(Path dir) {
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
dir = new Path(localPathRoot, dir);
}
return dir;
}
public void setUp() throws Exception {
super.setUp();
Path rootDir = getDir(ROOT_DIR);
Path inDir = getDir(IN_DIR);
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(rootDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
}
public void tearDown() throws Exception {
Path rootDir = getDir(ROOT_DIR);
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(rootDir, true);
super.tearDown();
}
protected void _testMOWithJavaSerialization(boolean withCounters) throws Exception {
Path inDir = getDir(IN_DIR);
Path outDir = getDir(OUT_DIR);
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
fs.delete(inDir, true);
fs.delete(outDir, true);
file = fs.create(new Path(inDir, "part-1"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
conf.setJobName("mo");
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization," +
"org.apache.hadoop.io.serializer.WritableSerialization");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(Long.class);
conf.setMapOutputValueClass(String.class);
conf.setOutputKeyComparatorClass(JavaSerializationComparator.class);
conf.setOutputKeyClass(Long.class);
conf.setOutputValueClass(String.class);
conf.setOutputFormat(TextOutputFormat.class);
MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
Long.class, String.class);
MultipleOutputs.setCountersEnabled(conf, withCounters);
conf.setMapperClass(MOJavaSerDeMap.class);
conf.setReducerClass(MOJavaSerDeReduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
// assert number of named output part files
int namedOutputCount = 0;
FileStatus[] statuses = fs.listStatus(outDir);
for (FileStatus status : statuses) {
if (status.getPath().getName().equals("text-m-00000") ||
status.getPath().getName().equals("text-r-00000")) {
namedOutputCount++;
}
}
assertEquals(2, namedOutputCount);
// assert TextOutputFormat files correctness
BufferedReader reader = new BufferedReader(
new InputStreamReader(fs.open(
new Path(FileOutputFormat.getOutputPath(conf), "text-r-00000"))));
int count = 0;
String line = reader.readLine();
while (line != null) {
assertTrue(line.endsWith("text"));
line = reader.readLine();
count++;
}
reader.close();
assertFalse(count == 0);
Counters.Group counters =
job.getCounters().getGroup(MultipleOutputs.class.getName());
if (!withCounters) {
assertEquals(0, counters.size());
}
else {
assertEquals(1, counters.size());
assertEquals(2, counters.getCounter("text"));
}
}
protected void _testMultipleOutputs(boolean withCounters) throws Exception {
Path inDir = getDir(IN_DIR);
Path outDir = getDir(OUT_DIR);
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
file = fs.create(new Path(inDir, "part-1"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
conf.setJobName("mo");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
LongWritable.class, Text.class);
MultipleOutputs.addMultiNamedOutput(conf, "sequence",
SequenceFileOutputFormat.class, LongWritable.class, Text.class);
MultipleOutputs.setCountersEnabled(conf, withCounters);
conf.setMapperClass(MOMap.class);
conf.setReducerClass(MOReduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
// assert number of named output part files
int namedOutputCount = 0;
FileStatus[] statuses = fs.listStatus(outDir);
for (FileStatus status : statuses) {
if (status.getPath().getName().equals("text-m-00000") ||
status.getPath().getName().equals("text-m-00001") ||
status.getPath().getName().equals("text-r-00000") ||
status.getPath().getName().equals("sequence_A-m-00000") ||
status.getPath().getName().equals("sequence_A-m-00001") ||
status.getPath().getName().equals("sequence_B-m-00000") ||
status.getPath().getName().equals("sequence_B-m-00001") ||
status.getPath().getName().equals("sequence_B-r-00000") ||
status.getPath().getName().equals("sequence_C-r-00000")) {
namedOutputCount++;
}
}
assertEquals(9, namedOutputCount);
// assert TextOutputFormat files correctness
BufferedReader reader = new BufferedReader(
new InputStreamReader(fs.open(
new Path(FileOutputFormat.getOutputPath(conf), "text-r-00000"))));
int count = 0;
String line = reader.readLine();
while (line != null) {
assertTrue(line.endsWith("text"));
line = reader.readLine();
count++;
}
reader.close();
assertFalse(count == 0);
// assert SequenceOutputFormat files correctness
SequenceFile.Reader seqReader =
new SequenceFile.Reader(fs, new Path(FileOutputFormat.getOutputPath(conf),
"sequence_B-r-00000"), conf);
assertEquals(LongWritable.class, seqReader.getKeyClass());
assertEquals(Text.class, seqReader.getValueClass());
count = 0;
LongWritable key = new LongWritable();
Text value = new Text();
while (seqReader.next(key, value)) {
assertEquals("sequence", value.toString());
count++;
}
seqReader.close();
assertFalse(count == 0);
Counters.Group counters =
job.getCounters().getGroup(MultipleOutputs.class.getName());
if (!withCounters) {
assertEquals(0, counters.size());
}
else {
assertEquals(4, counters.size());
assertEquals(4, counters.getCounter("text"));
assertEquals(2, counters.getCounter("sequence_A"));
assertEquals(4, counters.getCounter("sequence_B"));
assertEquals(2, counters.getCounter("sequence_C"));
}
}
@SuppressWarnings({"unchecked"})
public static class MOMap implements Mapper<LongWritable, Text, LongWritable,
Text> {
private MultipleOutputs mos;
public void configure(JobConf conf) {
mos = new MultipleOutputs(conf);
}
public void map(LongWritable key, Text value,
OutputCollector<LongWritable, Text> output,
Reporter reporter)
throws IOException {
if (!value.toString().equals("a")) {
output.collect(key, value);
} else {
mos.getCollector("text", reporter).collect(key, new Text("text"));
mos.getCollector("sequence", "A", reporter).collect(key,
new Text("sequence"));
mos.getCollector("sequence", "B", reporter).collect(key,
new Text("sequence"));
}
}
public void close() throws IOException {
mos.close();
}
}
@SuppressWarnings({"unchecked"})
public static class MOReduce implements Reducer<LongWritable, Text,
LongWritable, Text> {
private MultipleOutputs mos;
public void configure(JobConf conf) {
mos = new MultipleOutputs(conf);
}
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output,
Reporter reporter)
throws IOException {
while (values.hasNext()) {
Text value = values.next();
if (!value.toString().equals("b")) {
output.collect(key, value);
} else {
mos.getCollector("text", reporter).collect(key, new Text("text"));
mos.getCollector("sequence", "B", reporter).collect(key,
new Text("sequence"));
mos.getCollector("sequence", "C", reporter).collect(key,
new Text("sequence"));
}
}
}
public void close() throws IOException {
mos.close();
}
}
@SuppressWarnings({"unchecked"})
public static class MOJavaSerDeMap implements Mapper<LongWritable, Text, Long,
String> {
private MultipleOutputs mos;
public void configure(JobConf conf) {
mos = new MultipleOutputs(conf);
}
public void map(LongWritable key, Text value,
OutputCollector<Long, String> output,
Reporter reporter)
throws IOException {
if (!value.toString().equals("a")) {
output.collect(key.get(), value.toString());
} else {
mos.getCollector("text", reporter).collect(key, "text");
}
}
public void close() throws IOException {
mos.close();
}
}
@SuppressWarnings({"unchecked"})
public static class MOJavaSerDeReduce implements Reducer<Long, String,
Long, String> {
private MultipleOutputs mos;
public void configure(JobConf conf) {
mos = new MultipleOutputs(conf);
}
public void reduce(Long key, Iterator<String> values,
OutputCollector<Long, String> output,
Reporter reporter)
throws IOException {
while (values.hasNext()) {
String value = values.next();
if (!value.equals("b")) {
output.collect(key, value);
} else {
mos.getCollector("text", reporter).collect(key, "text");
}
}
}
public void close() throws IOException {
mos.close();
}
}
}
| 12,600 | 30.036946 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestMultithreadedMapRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Iterator;
public class TestMultithreadedMapRunner extends HadoopTestCase {
public TestMultithreadedMapRunner() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testOKRun() throws Exception {
run(false, false);
}
public void testIOExRun() throws Exception {
run(true, false);
}
public void testRuntimeExRun() throws Exception {
run(false, true);
}
private void run(boolean ioEx, boolean rtEx) throws Exception {
Path inDir = new Path("testing/mt/input");
Path outDir = new Path("testing/mt/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
}
conf.setJobName("mt");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(IDMap.class);
conf.setReducerClass(IDReduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setMapRunnerClass(MultithreadedMapRunner.class);
conf.setInt(MultithreadedMapper.NUM_THREADS, 2);
if (ioEx) {
conf.setBoolean("multithreaded.ioException", true);
}
if (rtEx) {
conf.setBoolean("multithreaded.runtimeException", true);
}
JobClient jc = new JobClient(conf);
RunningJob job =jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
if (job.isSuccessful()) {
assertFalse(ioEx || rtEx);
}
else {
assertTrue(ioEx || rtEx);
}
}
public static class IDMap implements Mapper<LongWritable, Text,
LongWritable, Text> {
private boolean ioEx = false;
private boolean rtEx = false;
public void configure(JobConf job) {
ioEx = job.getBoolean("multithreaded.ioException", false);
rtEx = job.getBoolean("multithreaded.runtimeException", false);
}
public void map(LongWritable key, Text value,
OutputCollector<LongWritable, Text> output,
Reporter reporter)
throws IOException {
if (ioEx) {
throw new IOException();
}
if (rtEx) {
throw new RuntimeException();
}
output.collect(key, value);
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
}
public void close() throws IOException {
}
}
public static class IDReduce implements Reducer<LongWritable, Text,
LongWritable, Text> {
public void configure(JobConf job) {
}
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output,
Reporter reporter)
throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
}
public void close() throws IOException {
}
}
}
| 5,049 | 28.360465 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestChainMapReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Iterator;
public class TestChainMapReduce extends HadoopTestCase {
private static Path getFlagDir(boolean local) {
Path flagDir = new Path("testing/chain/flags");
// Hack for local FS that does not have the concept of a 'mounting point'
if (local) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
flagDir = new Path(localPathRoot, flagDir);
}
return flagDir;
}
private static void cleanFlags(JobConf conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
fs.delete(getFlagDir(conf.getBoolean("localFS", true)), true);
fs.mkdirs(getFlagDir(conf.getBoolean("localFS", true)));
}
private static void writeFlag(JobConf conf, String flag) throws IOException {
FileSystem fs = FileSystem.get(conf);
if (getFlag(conf, flag)) {
fail("Flag " + flag + " already exists");
}
DataOutputStream file =
fs.create(new Path(getFlagDir(conf.getBoolean("localFS", true)), flag));
file.close();
}
private static boolean getFlag(JobConf conf, String flag) throws IOException {
FileSystem fs = FileSystem.get(conf);
return fs
.exists(new Path(getFlagDir(conf.getBoolean("localFS", true)), flag));
}
public TestChainMapReduce() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testChain() throws Exception {
Path inDir = new Path("testing/chain/input");
Path outDir = new Path("testing/chain/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
JobConf conf = createJobConf();
conf.setBoolean("localFS", isLocalFS());
conf.setInt("mapreduce.job.maps", 1);
cleanFlags(conf);
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("1\n2\n");
file.close();
conf.setJobName("chain");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.set("a", "X");
JobConf mapAConf = new JobConf(false);
mapAConf.set("a", "A");
ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, true, mapAConf);
ChainMapper.addMapper(conf, BMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, false, null);
JobConf reduceConf = new JobConf(false);
reduceConf.set("a", "C");
ChainReducer.setReducer(conf, CReduce.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, true, reduceConf);
ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, false, null);
JobConf mapEConf = new JobConf(false);
mapEConf.set("a", "E");
ChainReducer.addMapper(conf, EMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, true, mapEConf);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
assertTrue(getFlag(conf, "configure.A"));
assertTrue(getFlag(conf, "configure.B"));
assertTrue(getFlag(conf, "configure.C"));
assertTrue(getFlag(conf, "configure.D"));
assertTrue(getFlag(conf, "configure.E"));
assertTrue(getFlag(conf, "map.A.value.1"));
assertTrue(getFlag(conf, "map.A.value.2"));
assertTrue(getFlag(conf, "map.B.value.1"));
assertTrue(getFlag(conf, "map.B.value.2"));
assertTrue(getFlag(conf, "reduce.C.value.2"));
assertTrue(getFlag(conf, "reduce.C.value.1"));
assertTrue(getFlag(conf, "map.D.value.1"));
assertTrue(getFlag(conf, "map.D.value.2"));
assertTrue(getFlag(conf, "map.E.value.1"));
assertTrue(getFlag(conf, "map.E.value.2"));
assertTrue(getFlag(conf, "close.A"));
assertTrue(getFlag(conf, "close.B"));
assertTrue(getFlag(conf, "close.C"));
assertTrue(getFlag(conf, "close.D"));
assertTrue(getFlag(conf, "close.E"));
}
public static class AMap extends IDMap {
public AMap() {
super("A", "A", true);
}
}
public static class BMap extends IDMap {
public BMap() {
super("B", "X", false);
}
}
public static class CReduce extends IDReduce {
public CReduce() {
super("C", "C");
}
}
public static class DMap extends IDMap {
public DMap() {
super("D", "X", false);
}
}
public static class EMap extends IDMap {
public EMap() {
super("E", "E", true);
}
}
public static class IDMap
implements Mapper<LongWritable, Text, LongWritable, Text> {
private JobConf conf;
private String name;
private String prop;
private boolean byValue;
public IDMap(String name, String prop, boolean byValue) {
this.name = name;
this.prop = prop;
this.byValue = byValue;
}
public void configure(JobConf conf) {
this.conf = conf;
assertEquals(prop, conf.get("a"));
try {
writeFlag(conf, "configure." + name);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public void map(LongWritable key, Text value,
OutputCollector<LongWritable, Text> output,
Reporter reporter) throws IOException {
writeFlag(conf, "map." + name + ".value." + value);
key.set(10);
output.collect(key, value);
if (byValue) {
assertEquals(10, key.get());
} else {
assertNotSame(10, key.get());
}
key.set(11);
}
public void close() throws IOException {
try {
writeFlag(conf, "close." + name);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
public static class IDReduce
implements Reducer<LongWritable, Text, LongWritable, Text> {
private JobConf conf;
private String name;
private String prop;
private boolean byValue = false;
public IDReduce(String name, String prop) {
this.name = name;
this.prop = prop;
}
public void configure(JobConf conf) {
this.conf = conf;
assertEquals(prop, conf.get("a"));
try {
writeFlag(conf, "configure." + name);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public void reduce(LongWritable key, Iterator<Text> values,
OutputCollector<LongWritable, Text> output,
Reporter reporter) throws IOException {
while (values.hasNext()) {
Text value = values.next();
writeFlag(conf, "reduce." + name + ".value." + value);
key.set(10);
output.collect(key, value);
if (byValue) {
assertEquals(10, key.get());
} else {
assertNotSame(10, key.get());
}
key.set(11);
}
}
public void close() throws IOException {
try {
writeFlag(conf, "close." + name);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
}
| 8,746 | 29.583916 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.junit.Test;
public class TestKeyFieldBasedPartitioner {
/**
* Test is key-field-based partitioned works with empty key.
*/
@Test
public void testEmptyKey() throws Exception {
KeyFieldBasedPartitioner<Text, Text> kfbp =
new KeyFieldBasedPartitioner<Text, Text>();
JobConf conf = new JobConf();
conf.setInt("num.key.fields.for.partition", 10);
kfbp.configure(conf);
assertEquals("Empty key should map to 0th partition",
0, kfbp.getPartition(new Text(), new Text(), 10));
}
@Test
public void testMultiConfigure() {
KeyFieldBasedPartitioner<Text, Text> kfbp =
new KeyFieldBasedPartitioner<Text, Text>();
JobConf conf = new JobConf();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k1,1");
kfbp.setConf(conf);
Text key = new Text("foo\tbar");
Text val = new Text("val");
int partNum = kfbp.getPartition(key, val, 4096);
kfbp.configure(conf);
assertEquals(partNum, kfbp.getPartition(key,val, 4096));
}
}
| 1,995 | 35.290909 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
public class TestDelegatingInputFormat extends TestCase {
public void testSplitting() throws Exception {
JobConf conf = new JobConf();
MiniDFSCluster dfs = null;
try {
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4)
.racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
.hosts(new String[] { "host0", "host1", "host2", "host3" })
.build();
FileSystem fs = dfs.getFileSystem();
Path path = getPath("/foo/bar", fs);
Path path2 = getPath("/foo/baz", fs);
Path path3 = getPath("/bar/bar", fs);
Path path4 = getPath("/bar/baz", fs);
final int numSplits = 100;
MultipleInputs.addInputPath(conf, path, TextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(conf, path2, TextInputFormat.class,
MapClass2.class);
MultipleInputs.addInputPath(conf, path3, KeyValueTextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(conf, path4, TextInputFormat.class,
MapClass2.class);
DelegatingInputFormat inFormat = new DelegatingInputFormat();
InputSplit[] splits = inFormat.getSplits(conf, numSplits);
int[] bins = new int[3];
for (InputSplit split : splits) {
assertTrue(split instanceof TaggedInputSplit);
final TaggedInputSplit tis = (TaggedInputSplit) split;
int index = -1;
if (tis.getInputFormatClass().equals(KeyValueTextInputFormat.class)) {
// path3
index = 0;
} else if (tis.getMapperClass().equals(MapClass.class)) {
// path
index = 1;
} else {
// path2 and path4
index = 2;
}
bins[index]++;
}
// Each bin is a unique combination of a Mapper and InputFormat, and
// DelegatingInputFormat should split each bin into numSplits splits,
// regardless of the number of paths that use that Mapper/InputFormat
for (int count : bins) {
assertEquals(numSplits, count);
}
assertTrue(true);
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
static Path getPath(final String location, final FileSystem fs)
throws IOException {
Path path = new Path(location);
// create a multi-block file on hdfs
DataOutputStream out = fs.create(path, true, 4096, (short) 2, 512, null);
for (int i = 0; i < 1000; ++i) {
out.writeChars("Hello\n");
}
out.close();
return path;
}
static class MapClass implements Mapper<String, String, String, String> {
public void map(String key, String value,
OutputCollector<String, String> output, Reporter reporter)
throws IOException {
}
public void configure(JobConf job) {
}
public void close() throws IOException {
}
}
static class MapClass2 extends MapClass {
}
}
| 4,265 | 31.075188 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestLineInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
public class TestLineInputFormat extends TestCase {
private static int MAX_LENGTH = 200;
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestLineInputFormat");
public void testFormat() throws Exception {
JobConf job = new JobConf();
Path file = new Path(workDir, "test.txt");
int seed = new Random().nextInt();
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
int numLinesPerMap = 5;
job.setInt("mapreduce.input.lineinputformat.linespermap", numLinesPerMap);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH/10) + 1) {
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
checkFormat(job, numLinesPerMap);
}
}
// A reporter that does nothing
private static final Reporter voidReporter = Reporter.NULL;
void checkFormat(JobConf job, int expectedN) throws IOException{
NLineInputFormat format = new NLineInputFormat();
format.configure(job);
int ignoredNumSplits = 1;
InputSplit[] splits = format.getSplits(job, ignoredNumSplits);
// check all splits except last one
int count = 0;
for (int j = 0; j < splits.length -1; j++) {
assertEquals("There are no split locations", 0,
splits[j].getLocations().length);
RecordReader<LongWritable, Text> reader =
format.getRecordReader(splits[j], job, voidReporter);
Class readerClass = reader.getClass();
assertEquals("reader class is LineRecordReader.",
LineRecordReader.class, readerClass);
LongWritable key = reader.createKey();
Class keyClass = key.getClass();
assertEquals("Key class is LongWritable.", LongWritable.class, keyClass);
Text value = reader.createValue();
Class valueClass = value.getClass();
assertEquals("Value class is Text.", Text.class, valueClass);
try {
count = 0;
while (reader.next(key, value)) {
count++;
}
} finally {
reader.close();
}
assertEquals("number of lines in split is " + expectedN ,
expectedN, count);
}
}
public static void main(String[] args) throws Exception {
new TestLineInputFormat().testFormat();
}
}
| 3,946 | 32.168067 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestMultipleInputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
/**
* @see TestDelegatingInputFormat
*/
public class TestMultipleInputs extends TestCase {
public void testAddInputPathWithFormat() {
final JobConf conf = new JobConf();
MultipleInputs.addInputPath(conf, new Path("/foo"), TextInputFormat.class);
MultipleInputs.addInputPath(conf, new Path("/bar"),
KeyValueTextInputFormat.class);
final Map<Path, InputFormat> inputs = MultipleInputs
.getInputFormatMap(conf);
assertEquals(TextInputFormat.class, inputs.get(new Path("/foo")).getClass());
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar"))
.getClass());
}
public void testAddInputPathWithMapper() {
final JobConf conf = new JobConf();
MultipleInputs.addInputPath(conf, new Path("/foo"), TextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(conf, new Path("/bar"),
KeyValueTextInputFormat.class, MapClass2.class);
final Map<Path, InputFormat> inputs = MultipleInputs
.getInputFormatMap(conf);
final Map<Path, Class<? extends Mapper>> maps = MultipleInputs
.getMapperTypeMap(conf);
assertEquals(TextInputFormat.class, inputs.get(new Path("/foo")).getClass());
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar"))
.getClass());
assertEquals(MapClass.class, maps.get(new Path("/foo")));
assertEquals(MapClass2.class, maps.get(new Path("/bar")));
}
static class MapClass implements Mapper<String, String, String, String> {
public void map(String key, String value,
OutputCollector<String, String> output, Reporter reporter)
throws IOException {
}
public void configure(JobConf job) {
}
public void close() throws IOException {
}
}
static class MapClass2 extends MapClass {
}
}
| 3,099 | 35.046512 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestChain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.junit.Assert;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reducer;
import org.junit.Test;
public class TestChain {
@Test
public void testSetReducerWithReducerByValueAsTrue() throws Exception {
JobConf jobConf = new JobConf();
JobConf reducerConf = new JobConf();
Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
Object.class, Object.class, true, reducerConf);
boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
false);
Assert.assertEquals("It should set chain.reducer.byValue as true "
+ "in reducerConf when we give value as true", true, reduceByValue);
}
@Test
public void testSetReducerWithReducerByValueAsFalse() throws Exception {
JobConf jobConf = new JobConf();
JobConf reducerConf = new JobConf();
Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
Object.class, Object.class, false, reducerConf);
boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
true);
Assert.assertEquals("It should set chain.reducer.byValue as false "
+ "in reducerConf when we give value as false", false, reduceByValue);
}
interface MyReducer extends Reducer<Object, Object, Object, Object> {
}
}
| 2,160 | 36.912281 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/db/TestConstructQuery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
public class TestConstructQuery extends TestCase {
private String[] fieldNames = new String[] { "id", "name", "value" };
private String[] nullFieldNames = new String[] { null, null, null };
private String expected = "INSERT INTO hadoop_output (id,name,value) VALUES (?,?,?);";
private String nullExpected = "INSERT INTO hadoop_output VALUES (?,?,?);";
private DBOutputFormat<DBWritable, NullWritable> format
= new DBOutputFormat<DBWritable, NullWritable>();
public void testConstructQuery() {
String actual = format.constructQuery("hadoop_output", fieldNames);
assertEquals(expected, actual);
actual = format.constructQuery("hadoop_output", nullFieldNames);
assertEquals(nullExpected, actual);
}
public void testSetOutput() throws IOException {
JobConf job = new JobConf();
DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);
DBConfiguration dbConf = new DBConfiguration(job);
String actual = format.constructQuery(dbConf.getOutputTableName()
, dbConf.getOutputFieldNames());
assertEquals(expected, actual);
job = new JobConf();
dbConf = new DBConfiguration(job);
DBOutputFormat.setOutput(job, "hadoop_output", nullFieldNames.length);
assertNull(dbConf.getOutputFieldNames());
assertEquals(nullFieldNames.length, dbConf.getOutputFieldCount());
actual = format.constructQuery(dbConf.getOutputTableName()
, new String[dbConf.getOutputFieldCount()]);
assertEquals(nullExpected, actual);
}
}
| 2,543 | 36.411765 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/aggregate/TestAggregates.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.lib.*;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import junit.framework.TestCase;
import java.io.*;
import java.util.*;
import java.text.NumberFormat;
public class TestAggregates extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
public void testAggregates() throws Exception {
launch();
}
public static void launch() throws Exception {
JobConf conf = new JobConf(TestAggregates.class);
FileSystem fs = FileSystem.get(conf);
int numOfInputLines = 20;
Path OUTPUT_DIR = new Path("build/test/output_for_aggregates_test");
Path INPUT_DIR = new Path("build/test/input_for_aggregates_test");
String inputFile = "input.txt";
fs.delete(INPUT_DIR, true);
fs.mkdirs(INPUT_DIR);
fs.delete(OUTPUT_DIR, true);
StringBuffer inputData = new StringBuffer();
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append("max\t19\n");
expectedOutput.append("min\t1\n");
FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
for (int i = 1; i < numOfInputLines; i++) {
expectedOutput.append("count_").append(idFormat.format(i));
expectedOutput.append("\t").append(i).append("\n");
inputData.append(idFormat.format(i));
for (int j = 1; j < i; j++) {
inputData.append(" ").append(idFormat.format(i));
}
inputData.append("\n");
}
expectedOutput.append("value_as_string_max\t9\n");
expectedOutput.append("value_as_string_min\t1\n");
expectedOutput.append("uniq_count\t15\n");
fileOut.write(inputData.toString().getBytes("utf-8"));
fileOut.close();
System.out.println("inputData:");
System.out.println(inputData.toString());
JobConf job = new JobConf(conf, TestAggregates.class);
FileInputFormat.setInputPaths(job, INPUT_DIR);
job.setInputFormat(TextInputFormat.class);
FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
job.setOutputFormat(TextOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
job.setMapperClass(ValueAggregatorMapper.class);
job.setReducerClass(ValueAggregatorReducer.class);
job.setCombinerClass(ValueAggregatorCombiner.class);
job.setInt("aggregator.descriptor.num", 1);
job.set("aggregator.descriptor.0",
"UserDefined,org.apache.hadoop.mapred.lib.aggregate.AggregatorTests");
job.setLong("aggregate.max.num.unique.values", 14);
JobClient.runJob(job);
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
boolean success = true;
Path outPath = new Path(OUTPUT_DIR, "part-00000");
String outdata = MapReduceTestUtil.readOutput(outPath,job);
System.out.println("full out data:");
System.out.println(outdata.toString());
outdata = outdata.substring(0, expectedOutput.toString().length());
assertEquals(expectedOutput.toString(),outdata);
//fs.delete(OUTPUT_DIR);
fs.delete(INPUT_DIR, true);
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}
| 4,430 | 33.348837 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.aggregate;
import org.apache.hadoop.io.Text;
import java.util.ArrayList;
import java.util.Map.Entry;
public class AggregatorTests extends ValueAggregatorBaseDescriptor {
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key, Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String [] words = val.toString().split(" ");
String countType;
String id;
Entry<Text, Text> e;
for (String word: words) {
long numVal = Long.parseLong(word);
countType = LONG_VALUE_SUM;
id = "count_" + word;
e = generateEntry(countType, id, ValueAggregatorDescriptor.ONE);
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MAX;
id = "max";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MIN;
id = "min";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MAX;
id = "value_as_string_max";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MIN;
id = "value_as_string_min";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = UNIQ_VALUE_COUNT;
id = "uniq_count";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = VALUE_HISTOGRAM;
id = "histogram";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
}
return retv;
}
}
| 2,646 | 28.741573 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
/**
* Reducer that accumulates values based on their type.
* <p>
* The type is specified in the key part of the key-value pair
* as a prefix to the key in the following way
* <p>
* <tt>type:key</tt>
* <p>
* The values are accumulated according to the types:
* <ul>
* <li><tt>s:</tt> - string, concatenate</li>
* <li><tt>f:</tt> - float, summ</li>
* <li><tt>l:</tt> - long, summ</li>
* </ul>
*
*/
@SuppressWarnings("deprecation")
public class AccumulatingReducer extends MapReduceBase
implements Reducer<Text, Text, Text, Text> {
static final String VALUE_TYPE_LONG = "l:";
static final String VALUE_TYPE_FLOAT = "f:";
static final String VALUE_TYPE_STRING = "s:";
private static final Log LOG = LogFactory.getLog(AccumulatingReducer.class);
protected String hostName;
public AccumulatingReducer () {
try {
hostName = java.net.InetAddress.getLocalHost().getHostName();
} catch(Exception e) {
hostName = "localhost";
}
LOG.info("Starting AccumulatingReducer on " + hostName);
}
public void reduce(Text key,
Iterator<Text> values,
OutputCollector<Text, Text> output,
Reporter reporter
) throws IOException {
String field = key.toString();
reporter.setStatus("starting " + field + " ::host = " + hostName);
// concatenate strings
if (field.startsWith(VALUE_TYPE_STRING)) {
StringBuffer sSum = new StringBuffer();
while (values.hasNext())
sSum.append(values.next().toString()).append(";");
output.collect(key, new Text(sSum.toString()));
reporter.setStatus("finished " + field + " ::host = " + hostName);
return;
}
// sum long values
if (field.startsWith(VALUE_TYPE_FLOAT)) {
float fSum = 0;
while (values.hasNext())
fSum += Float.parseFloat(values.next().toString());
output.collect(key, new Text(String.valueOf(fSum)));
reporter.setStatus("finished " + field + " ::host = " + hostName);
return;
}
// sum long values
if (field.startsWith(VALUE_TYPE_LONG)) {
long lSum = 0;
while (values.hasNext()) {
lSum += Long.parseLong(values.next().toString());
}
output.collect(key, new Text(String.valueOf(lSum)));
}
reporter.setStatus("finished " + field + " ::host = " + hostName);
}
}
| 3,443 | 33.09901 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestJHLA.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.io.File;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.Before;
/**
* Test Job History Log Analyzer.
*
* @see JHLogAnalyzer
*/
public class TestJHLA extends TestCase {
private static final Log LOG = LogFactory.getLog(JHLogAnalyzer.class);
private String historyLog = System.getProperty("test.build.data",
"build/test/data") + "/history/test.log";
@Before
public void setUp() throws Exception {
File logFile = new File(historyLog);
if(!logFile.getParentFile().exists())
if(!logFile.getParentFile().mkdirs())
LOG.error("Cannot create dirs for history log file: " + historyLog);
if(!logFile.createNewFile())
LOG.error("Cannot create history log file: " + historyLog);
BufferedWriter writer = new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(historyLog)));
writer.write("$!!FILE=file1.log!!"); writer.newLine();
writer.write("Meta VERSION=\"1\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0004\" JOBNAME=\"streamjob21364.jar\" USER=\"hadoop\" SUBMIT_TIME=\"1237962008012\" JOBCONF=\"hdfs:///job_200903250600_0004/job.xml\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0004\" JOB_PRIORITY=\"NORMAL\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0004\" LAUNCH_TIME=\"1237962008712\" TOTAL_MAPS=\"2\" TOTAL_REDUCES=\"0\" JOB_STATUS=\"PREP\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000003\" TASK_TYPE=\"SETUP\" START_TIME=\"1237962008736\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0004_m_000003\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000003_0\" START_TIME=\"1237962010929\" TRACKER_NAME=\"tracker_50445\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0004_m_000003\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000003_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962012459\" HOSTNAME=\"host.com\" STATE_STRING=\"setup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000003\" TASK_TYPE=\"SETUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962023824\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0004\" JOB_STATUS=\"RUNNING\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000000\" TASK_TYPE=\"MAP\" START_TIME=\"1237962024049\" SPLITS=\"host1.com,host2.com,host3.com\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000001\" TASK_TYPE=\"MAP\" START_TIME=\"1237962024065\" SPLITS=\"host1.com,host2.com,host3.com\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0004_m_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000000_0\" START_TIME=\"1237962026157\" TRACKER_NAME=\"tracker_50524\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0004_m_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000000_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962041307\" HOSTNAME=\"host.com\" STATE_STRING=\"Records R/W=2681/1\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(56630)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(28327)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(2681)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(28327)][(MAP_OUTPUT_RECORDS)(Map output records)(2681)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000000\" TASK_TYPE=\"MAP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962054138\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(56630)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(28327)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(2681)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(28327)][(MAP_OUTPUT_RECORDS)(Map output records)(2681)]}\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0004_m_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000001_0\" START_TIME=\"1237962026077\" TRACKER_NAME=\"tracker_50162\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0004_m_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000001_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962041030\" HOSTNAME=\"host.com\" STATE_STRING=\"Records R/W=2634/1\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(28316)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(28303)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(2634)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(28303)][(MAP_OUTPUT_RECORDS)(Map output records)(2634)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000001\" TASK_TYPE=\"MAP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962054187\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(28316)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(28303)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(2634)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(28303)][(MAP_OUTPUT_RECORDS)(Map output records)(2634)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000002\" TASK_TYPE=\"CLEANUP\" START_TIME=\"1237962054187\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0004_m_000002\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000002_0\" START_TIME=\"1237962055578\" TRACKER_NAME=\"tracker_50162\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0004_m_000002\" TASK_ATTEMPT_ID=\"attempt_200903250600_0004_m_000002_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962056782\" HOSTNAME=\"host.com\" STATE_STRING=\"cleanup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0004_m_000002\" TASK_TYPE=\"CLEANUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237962069193\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0004\" FINISH_TIME=\"1237962069193\" JOB_STATUS=\"SUCCESS\" FINISHED_MAPS=\"2\" FINISHED_REDUCES=\"0\" FAILED_MAPS=\"0\" FAILED_REDUCES=\"0\" COUNTERS=\"{(org.apache.hadoop.mapred.JobInProgress$Counter)(Job Counters )[(TOTAL_LAUNCHED_MAPS)(Launched map tasks)(2)]}{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(84946)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(56630)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(5315)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(56630)][(MAP_OUTPUT_RECORDS)(Map output records)(5315)]}\" ."); writer.newLine();
writer.write("$!!FILE=file2.log!!"); writer.newLine();
writer.write("Meta VERSION=\"1\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0023\" JOBNAME=\"TestJob\" USER=\"hadoop2\" SUBMIT_TIME=\"1237964779799\" JOBCONF=\"hdfs:///job_200903250600_0023/job.xml\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0023\" JOB_PRIORITY=\"NORMAL\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0023\" LAUNCH_TIME=\"1237964780928\" TOTAL_MAPS=\"2\" TOTAL_REDUCES=\"0\" JOB_STATUS=\"PREP\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_r_000001\" TASK_TYPE=\"SETUP\" START_TIME=\"1237964780940\" SPLITS=\"\" ."); writer.newLine();
writer.write("ReduceAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0023_r_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_r_000001_0\" START_TIME=\"1237964720322\" TRACKER_NAME=\"tracker_3065\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("ReduceAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0023_r_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_r_000001_0\" TASK_STATUS=\"SUCCESS\" SHUFFLE_FINISHED=\"1237964722118\" SORT_FINISHED=\"1237964722118\" FINISH_TIME=\"1237964722118\" HOSTNAME=\"host.com\" STATE_STRING=\"setup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(0)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_r_000001\" TASK_TYPE=\"SETUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964796054\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(0)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0023\" JOB_STATUS=\"RUNNING\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_m_000000\" TASK_TYPE=\"MAP\" START_TIME=\"1237964796176\" SPLITS=\"\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_m_000001\" TASK_TYPE=\"MAP\" START_TIME=\"1237964796176\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0023_m_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_m_000000_0\" START_TIME=\"1237964809765\" TRACKER_NAME=\"tracker_50459\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0023_m_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_m_000000_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964911772\" HOSTNAME=\"host.com\" STATE_STRING=\"\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(500000000)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(5000000)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(5000000)][(MAP_OUTPUT_RECORDS)(Map output records)(5000000)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_m_000000\" TASK_TYPE=\"MAP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964916534\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(500000000)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(5000000)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(5000000)][(MAP_OUTPUT_RECORDS)(Map output records)(5000000)]}\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0023_m_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_m_000001_0\" START_TIME=\"1237964798169\" TRACKER_NAME=\"tracker_1524\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0023_m_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_m_000001_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964962960\" HOSTNAME=\"host.com\" STATE_STRING=\"\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(500000000)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(5000000)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(5000000)][(MAP_OUTPUT_RECORDS)(Map output records)(5000000)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_m_000001\" TASK_TYPE=\"MAP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964976870\" COUNTERS=\"{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(500000000)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(5000000)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(5000000)][(MAP_OUTPUT_RECORDS)(Map output records)(5000000)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_r_000000\" TASK_TYPE=\"CLEANUP\" START_TIME=\"1237964976871\" SPLITS=\"\" ."); writer.newLine();
writer.write("ReduceAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0023_r_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_r_000000_0\" START_TIME=\"1237964977208\" TRACKER_NAME=\"tracker_1524\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("ReduceAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0023_r_000000\" TASK_ATTEMPT_ID=\"attempt_200903250600_0023_r_000000_0\" TASK_STATUS=\"SUCCESS\" SHUFFLE_FINISHED=\"1237964979031\" SORT_FINISHED=\"1237964979031\" FINISH_TIME=\"1237964979032\" HOSTNAME=\"host.com\" STATE_STRING=\"cleanup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(0)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0023_r_000000\" TASK_TYPE=\"CLEANUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237964991879\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(0)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0023\" FINISH_TIME=\"1237964991879\" JOB_STATUS=\"SUCCESS\" FINISHED_MAPS=\"2\" FINISHED_REDUCES=\"0\" FAILED_MAPS=\"0\" FAILED_REDUCES=\"0\" COUNTERS=\"{(org.apache.hadoop.mapred.JobInProgress$Counter)(Job Counters )[(TOTAL_LAUNCHED_MAPS)(Launched map tasks)(2)]}{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(1000000000)]}{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(MAP_INPUT_RECORDS)(Map input records)(10000000)][(SPILLED_RECORDS)(Spilled Records)(0)][(MAP_INPUT_BYTES)(Map input bytes)(10000000)][(MAP_OUTPUT_RECORDS)(Map output records)(10000000)]}\" ."); writer.newLine();
writer.write("$!!FILE=file3.log!!"); writer.newLine();
writer.write("Meta VERSION=\"1\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0034\" JOBNAME=\"TestJob\" USER=\"hadoop3\" SUBMIT_TIME=\"1237966370007\" JOBCONF=\"hdfs:///job_200903250600_0034/job.xml\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0034\" JOB_PRIORITY=\"NORMAL\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0034\" LAUNCH_TIME=\"1237966371076\" TOTAL_MAPS=\"2\" TOTAL_REDUCES=\"0\" JOB_STATUS=\"PREP\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000003\" TASK_TYPE=\"SETUP\" START_TIME=\"1237966371093\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0034_m_000003\" TASK_ATTEMPT_ID=\"attempt_200903250600_0034_m_000003_0\" START_TIME=\"1237966371524\" TRACKER_NAME=\"tracker_50118\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"SETUP\" TASKID=\"task_200903250600_0034_m_000003\" TASK_ATTEMPT_ID=\"attempt_200903250600_0034_m_000003_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237966373174\" HOSTNAME=\"host.com\" STATE_STRING=\"setup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000003\" TASK_TYPE=\"SETUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237966386098\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0034\" JOB_STATUS=\"RUNNING\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000000\" TASK_TYPE=\"MAP\" START_TIME=\"1237966386111\" SPLITS=\"\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000001\" TASK_TYPE=\"MAP\" START_TIME=\"1237966386124\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"MAP\" TASKID=\"task_200903250600_0034_m_000001\" TASK_ATTEMPT_ID=\"attempt_200903250600_0034_m_000001_0\" TASK_STATUS=\"FAILED\" FINISH_TIME=\"1237967174546\" HOSTNAME=\"host.com\" ERROR=\"java.io.IOException: Task process exit with nonzero status of 15."); writer.newLine();
writer.write(" at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:424)"); writer.newLine();
writer.write(",java.io.IOException: Task process exit with nonzero status of 15."); writer.newLine();
writer.write(" at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:424)"); writer.newLine();
writer.write("\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000002\" TASK_TYPE=\"CLEANUP\" START_TIME=\"1237967170815\" SPLITS=\"\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0034_m_000002\" TASK_ATTEMPT_ID=\"attempt_200903250600_0034_m_000002_0\" START_TIME=\"1237967168653\" TRACKER_NAME=\"tracker_3105\" HTTP_PORT=\"50060\" ."); writer.newLine();
writer.write("MapAttempt TASK_TYPE=\"CLEANUP\" TASKID=\"task_200903250600_0034_m_000002\" TASK_ATTEMPT_ID=\"attempt_200903250600_0034_m_000002_0\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237967171301\" HOSTNAME=\"host.com\" STATE_STRING=\"cleanup\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Task TASKID=\"task_200903250600_0034_m_000002\" TASK_TYPE=\"CLEANUP\" TASK_STATUS=\"SUCCESS\" FINISH_TIME=\"1237967185818\" COUNTERS=\"{(org.apache.hadoop.mapred.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}\" ."); writer.newLine();
writer.write("Job JOBID=\"job_200903250600_0034\" FINISH_TIME=\"1237967185818\" JOB_STATUS=\"KILLED\" FINISHED_MAPS=\"0\" FINISHED_REDUCES=\"0\" ."); writer.newLine();
writer.close();
}
@After
public void tearDown() throws Exception {
File logFile = new File(historyLog);
if(!logFile.delete())
LOG.error("Cannot delete history log file: " + historyLog);
if(!logFile.getParentFile().delete())
LOG.error("Cannot delete history log dir: " + historyLog);
}
/**
* Run log analyzer in test mode for file test.log.
*/
public void testJHLA() {
String[] args = {"-test", historyLog, "-jobDelimiter", ".!!FILE=.*!!"};
JHLogAnalyzer.main(args);
args = new String[]{"-test", historyLog, "-jobDelimiter", ".!!FILE=.*!!",
"-usersIncluded", "hadoop,hadoop2"};
JHLogAnalyzer.main(args);
args = new String[]{"-test", historyLog, "-jobDelimiter", ".!!FILE=.*!!",
"-usersExcluded", "hadoop,hadoop3"};
JHLogAnalyzer.main(args);
}
}
| 21,291 | 143.843537 | 709 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DFSCIOTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.util.Date;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*;
import org.junit.Ignore;
/**
* Distributed i/o benchmark.
* <p>
* This test writes into or reads from a specified number of files.
* File size is specified as a parameter to the test.
* Each file is accessed in a separate map task.
* <p>
* The reducer collects the following statistics:
* <ul>
* <li>number of tasks completed</li>
* <li>number of bytes written/read</li>
* <li>execution time</li>
* <li>io rate</li>
* <li>io rate squared</li>
* </ul>
*
* Finally, the following information is appended to a local file
* <ul>
* <li>read or write test</li>
* <li>date and time the test finished</li>
* <li>number of files</li>
* <li>total number of bytes processed</li>
* <li>throughput in mb/sec (total number of bytes / sum of processing times)</li>
* <li>average i/o rate in mb/sec per file</li>
* <li>standard i/o rate deviation</li>
* </ul>
*/
@Ignore
public class DFSCIOTest extends TestCase {
// Constants
private static final Log LOG = LogFactory.getLog(DFSCIOTest.class);
private static final int TEST_TYPE_READ = 0;
private static final int TEST_TYPE_WRITE = 1;
private static final int TEST_TYPE_CLEANUP = 2;
private static final int DEFAULT_BUFFER_SIZE = 1000000;
private static final String BASE_FILE_NAME = "test_io_";
private static final String DEFAULT_RES_FILE_NAME = "DFSCIOTest_results.log";
private static Configuration fsConfig = new Configuration();
private static final long MEGA = 0x100000;
private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/DFSCIOTest");
private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control");
private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write");
private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read");
private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data");
private static Path HDFS_TEST_DIR = new Path("/tmp/DFSCIOTest");
private static String HDFS_LIB_VERSION = System.getProperty("libhdfs.version", "1");
private static String CHMOD = new String("chmod");
private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION);
private static Path HDFS_READ = new Path(HDFS_TEST_DIR + "/hdfs_read");
private static Path HDFS_WRITE = new Path(HDFS_TEST_DIR + "/hdfs_write");
/**
* Run the test with default parameters.
*
* @throws Exception
*/
public void testIOs() throws Exception {
testIOs(10, 10);
}
/**
* Run the test with the specified parameters.
*
* @param fileSize file size
* @param nrFiles number of files
* @throws IOException
*/
public static void testIOs(int fileSize, int nrFiles)
throws IOException {
FileSystem fs = FileSystem.get(fsConfig);
createControlFile(fs, fileSize, nrFiles);
writeTest(fs);
readTest(fs);
}
private static void createControlFile(
FileSystem fs,
int fileSize, // in MB
int nrFiles
) throws IOException {
LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files");
fs.delete(CONTROL_DIR, true);
for(int i=0; i < nrFiles; i++) {
String name = getFileName(i);
Path controlFile = new Path(CONTROL_DIR, "in_file_" + name);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, fsConfig, controlFile,
Text.class, LongWritable.class,
CompressionType.NONE);
writer.append(new Text(name), new LongWritable(fileSize));
} catch(Exception e) {
throw new IOException(e.getLocalizedMessage());
} finally {
if (writer != null)
writer.close();
writer = null;
}
}
LOG.info("created control files for: "+nrFiles+" files");
}
private static String getFileName(int fIdx) {
return BASE_FILE_NAME + Integer.toString(fIdx);
}
/**
* Write/Read mapper base class.
* <p>
* Collects the following statistics per task:
* <ul>
* <li>number of tasks completed</li>
* <li>number of bytes written/read</li>
* <li>execution time</li>
* <li>i/o rate</li>
* <li>i/o rate squared</li>
* </ul>
*/
private abstract static class IOStatMapper extends IOMapperBase<Long> {
IOStatMapper() {
}
void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
Long objSize) throws IOException {
long totalSize = objSize.longValue();
float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
LOG.info("Number of bytes processed = " + totalSize);
LOG.info("Exec time = " + execTime);
LOG.info("IO rate = " + ioRateMbSec);
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
new Text(String.valueOf(1)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
new Text(String.valueOf(totalSize)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
new Text(String.valueOf(execTime)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
new Text(String.valueOf(ioRateMbSec*1000)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
}
}
/**
* Write mapper class.
*/
public static class WriteMapper extends IOStatMapper {
public WriteMapper() {
super();
for(int i=0; i < bufferSize; i++)
buffer[i] = (byte)('0' + i % 50);
}
public Long doIO(Reporter reporter,
String name,
long totalSize
) throws IOException {
// create file
totalSize *= MEGA;
// create instance of local filesystem
FileSystem localFS = FileSystem.getLocal(fsConfig);
try {
// native runtime
Runtime runTime = Runtime.getRuntime();
// copy the dso and executable from dfs and chmod them
synchronized (this) {
localFS.delete(HDFS_TEST_DIR, true);
if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
}
}
synchronized (this) {
if (!localFS.exists(HDFS_SHLIB)) {
FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig);
String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
Process process = runTime.exec(chmodCmd);
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
}
}
}
synchronized (this) {
if (!localFS.exists(HDFS_WRITE)) {
FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig);
String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE);
Process process = runTime.exec(chmodCmd);
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
}
}
}
// exec the C program
Path outFile = new Path(DATA_DIR, name);
String writeCmd = new String(HDFS_WRITE + " " + outFile + " " + totalSize + " " + bufferSize);
Process process = runTime.exec(writeCmd, null, new File(HDFS_TEST_DIR.toString()));
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(writeCmd + ": Failed with exitStatus: " + exitStatus);
}
} catch (InterruptedException interruptedException) {
reporter.setStatus(interruptedException.toString());
} finally {
localFS.close();
}
return new Long(totalSize);
}
}
private static void writeTest(FileSystem fs)
throws IOException {
fs.delete(DATA_DIR, true);
fs.delete(WRITE_DIR, true);
runIOTest(WriteMapper.class, WRITE_DIR);
}
private static void runIOTest( Class<? extends Mapper> mapperClass,
Path outputDir
) throws IOException {
JobConf job = new JobConf(fsConfig, DFSCIOTest.class);
FileInputFormat.setInputPaths(job, CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(mapperClass);
job.setReducerClass(AccumulatingReducer.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
/**
* Read mapper class.
*/
public static class ReadMapper extends IOStatMapper {
public ReadMapper() {
super();
}
public Long doIO(Reporter reporter,
String name,
long totalSize
) throws IOException {
totalSize *= MEGA;
// create instance of local filesystem
FileSystem localFS = FileSystem.getLocal(fsConfig);
try {
// native runtime
Runtime runTime = Runtime.getRuntime();
// copy the dso and executable from dfs
synchronized (this) {
localFS.delete(HDFS_TEST_DIR, true);
if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
}
}
synchronized (this) {
if (!localFS.exists(HDFS_SHLIB)) {
if (!FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig)) {
throw new IOException("Failed to copy " + HDFS_SHLIB + " to local filesystem");
}
String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
Process process = runTime.exec(chmodCmd);
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
}
}
}
synchronized (this) {
if (!localFS.exists(HDFS_READ)) {
if (!FileUtil.copy(fs, HDFS_READ, localFS, HDFS_READ, false, fsConfig)) {
throw new IOException("Failed to copy " + HDFS_READ + " to local filesystem");
}
String chmodCmd = new String(CHMOD + " a+x " + HDFS_READ);
Process process = runTime.exec(chmodCmd);
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
}
}
}
// exec the C program
Path inFile = new Path(DATA_DIR, name);
String readCmd = new String(HDFS_READ + " " + inFile + " " + totalSize + " " +
bufferSize);
Process process = runTime.exec(readCmd, null, new File(HDFS_TEST_DIR.toString()));
int exitStatus = process.waitFor();
if (exitStatus != 0) {
throw new IOException(HDFS_READ + ": Failed with exitStatus: " + exitStatus);
}
} catch (InterruptedException interruptedException) {
reporter.setStatus(interruptedException.toString());
} finally {
localFS.close();
}
return new Long(totalSize);
}
}
private static void readTest(FileSystem fs) throws IOException {
fs.delete(READ_DIR, true);
runIOTest(ReadMapper.class, READ_DIR);
}
private static void sequentialTest(
FileSystem fs,
int testType,
int fileSize,
int nrFiles
) throws Exception {
IOStatMapper ioer = null;
if (testType == TEST_TYPE_READ)
ioer = new ReadMapper();
else if (testType == TEST_TYPE_WRITE)
ioer = new WriteMapper();
else
return;
for(int i=0; i < nrFiles; i++)
ioer.doIO(Reporter.NULL,
BASE_FILE_NAME+Integer.toString(i),
MEGA*fileSize);
}
public static void main(String[] args) {
int testType = TEST_TYPE_READ;
int bufferSize = DEFAULT_BUFFER_SIZE;
int fileSize = 1;
int nrFiles = 1;
String resFileName = DEFAULT_RES_FILE_NAME;
boolean isSequential = false;
String version="DFSCIOTest.0.0.1";
String usage = "Usage: DFSCIOTest -read | -write | -clean [-nrFiles N] [-fileSize MB] [-resFile resultFileName] [-bufferSize Bytes] ";
System.out.println(version);
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].startsWith("-r")) {
testType = TEST_TYPE_READ;
} else if (args[i].startsWith("-w")) {
testType = TEST_TYPE_WRITE;
} else if (args[i].startsWith("-clean")) {
testType = TEST_TYPE_CLEANUP;
} else if (args[i].startsWith("-seq")) {
isSequential = true;
} else if (args[i].equals("-nrFiles")) {
nrFiles = Integer.parseInt(args[++i]);
} else if (args[i].equals("-fileSize")) {
fileSize = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bufferSize")) {
bufferSize = Integer.parseInt(args[++i]);
} else if (args[i].equals("-resFile")) {
resFileName = args[++i];
}
}
LOG.info("nrFiles = " + nrFiles);
LOG.info("fileSize (MB) = " + fileSize);
LOG.info("bufferSize = " + bufferSize);
try {
fsConfig.setInt("test.io.file.buffer.size", bufferSize);
FileSystem fs = FileSystem.get(fsConfig);
if (testType != TEST_TYPE_CLEANUP) {
fs.delete(HDFS_TEST_DIR, true);
if (!fs.mkdirs(HDFS_TEST_DIR)) {
throw new IOException("Mkdirs failed to create " +
HDFS_TEST_DIR.toString());
}
//Copy the executables over to the remote filesystem
String hadoopHome = System.getenv("HADOOP_PREFIX");
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
HDFS_SHLIB);
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE);
}
if (isSequential) {
long tStart = System.currentTimeMillis();
sequentialTest(fs, testType, fileSize, nrFiles);
long execTime = System.currentTimeMillis() - tStart;
String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
LOG.info(resultLine);
return;
}
if (testType == TEST_TYPE_CLEANUP) {
cleanup(fs);
return;
}
createControlFile(fs, fileSize, nrFiles);
long tStart = System.currentTimeMillis();
if (testType == TEST_TYPE_WRITE)
writeTest(fs);
if (testType == TEST_TYPE_READ)
readTest(fs);
long execTime = System.currentTimeMillis() - tStart;
analyzeResult(fs, testType, execTime, resFileName);
} catch(Exception e) {
System.err.print(e.getLocalizedMessage());
System.exit(-1);
}
}
private static void analyzeResult( FileSystem fs,
int testType,
long execTime,
String resFileName
) throws IOException {
Path reduceFile;
if (testType == TEST_TYPE_WRITE)
reduceFile = new Path(WRITE_DIR, "part-00000");
else
reduceFile = new Path(READ_DIR, "part-00000");
DataInputStream in;
in = new DataInputStream(fs.open(reduceFile));
BufferedReader lines;
lines = new BufferedReader(new InputStreamReader(in));
long tasks = 0;
long size = 0;
long time = 0;
float rate = 0;
float sqrate = 0;
String line;
while((line = lines.readLine()) != null) {
StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
String attr = tokens.nextToken();
if (attr.endsWith(":tasks"))
tasks = Long.parseLong(tokens.nextToken());
else if (attr.endsWith(":size"))
size = Long.parseLong(tokens. nextToken());
else if (attr.endsWith(":time"))
time = Long.parseLong(tokens.nextToken());
else if (attr.endsWith(":rate"))
rate = Float.parseFloat(tokens.nextToken());
else if (attr.endsWith(":sqrate"))
sqrate = Float.parseFloat(tokens.nextToken());
}
double med = rate / 1000 / tasks;
double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
String resultLines[] = {
"----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
(testType == TEST_TYPE_READ) ? "read" :
"unknown"),
" Date & time: " + new Date(System.currentTimeMillis()),
" Number of files: " + tasks,
"Total MBytes processed: " + size/MEGA,
" Throughput mb/sec: " + size * 1000.0 / (time * MEGA),
"Average IO rate mb/sec: " + med,
" Std IO rate deviation: " + stdDev,
" Test exec time sec: " + (float)execTime / 1000,
"" };
PrintStream res = new PrintStream(
new FileOutputStream(
new File(resFileName), true));
for(int i = 0; i < resultLines.length; i++) {
LOG.info(resultLines[i]);
res.println(resultLines[i]);
}
}
private static void cleanup(FileSystem fs) throws Exception {
LOG.info("Cleaning up test files");
fs.delete(new Path(TEST_ROOT_DIR), true);
fs.delete(HDFS_TEST_DIR, true);
}
}
| 19,867 | 34.927667 | 138 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DistributedFSCheck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.util.Date;
import java.util.StringTokenizer;
import java.util.TreeSet;
import java.util.Vector;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*;
import org.junit.Ignore;
/**
* Distributed checkup of the file system consistency.
* <p>
* Test file system consistency by reading each block of each file
* of the specified file tree.
* Report corrupted blocks and general file statistics.
* <p>
* Optionally displays statistics on read performance.
*
*/
@Ignore
public class DistributedFSCheck extends TestCase {
// Constants
private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class);
private static final int TEST_TYPE_READ = 0;
private static final int TEST_TYPE_CLEANUP = 2;
private static final int DEFAULT_BUFFER_SIZE = 1000000;
private static final String DEFAULT_RES_FILE_NAME = "DistributedFSCheck_results.log";
private static final long MEGA = 0x100000;
private static Configuration fsConfig = new Configuration();
private static Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/benchmarks/DistributedFSCheck"));
private static Path MAP_INPUT_DIR = new Path(TEST_ROOT_DIR, "map_input");
private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read");
private FileSystem fs;
private long nrFiles;
DistributedFSCheck(Configuration conf) throws Exception {
fsConfig = conf;
this.fs = FileSystem.get(conf);
}
/**
* Run distributed checkup for the entire files system.
*
* @throws Exception
*/
public void testFSBlocks() throws Exception {
testFSBlocks("/");
}
/**
* Run distributed checkup for the specified directory.
*
* @param rootName root directory name
* @throws Exception
*/
public void testFSBlocks(String rootName) throws Exception {
createInputFile(rootName);
runDistributedFSCheck();
cleanup(); // clean up after all to restore the system state
}
private void createInputFile(String rootName) throws IOException {
cleanup(); // clean up if previous run failed
Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, fsConfig, inputFile,
Text.class, LongWritable.class, CompressionType.NONE);
try {
nrFiles = 0;
listSubtree(new Path(rootName), writer);
} finally {
writer.close();
}
LOG.info("Created map input files.");
}
private void listSubtree(Path rootFile,
SequenceFile.Writer writer
) throws IOException {
FileStatus rootStatus = fs.getFileStatus(rootFile);
listSubtree(rootStatus, writer);
}
private void listSubtree(FileStatus rootStatus,
SequenceFile.Writer writer
) throws IOException {
Path rootFile = rootStatus.getPath();
if (rootStatus.isFile()) {
nrFiles++;
// For a regular file generate <fName,offset> pairs
long blockSize = fs.getDefaultBlockSize(rootFile);
long fileLength = rootStatus.getLen();
for(long offset = 0; offset < fileLength; offset += blockSize)
writer.append(new Text(rootFile.toString()), new LongWritable(offset));
return;
}
FileStatus [] children = null;
try {
children = fs.listStatus(rootFile);
} catch (FileNotFoundException fnfe ){
throw new IOException("Could not get listing for " + rootFile);
}
for (int i = 0; i < children.length; i++)
listSubtree(children[i], writer);
}
/**
* DistributedFSCheck mapper class.
*/
public static class DistributedFSCheckMapper extends IOMapperBase<Object> {
public DistributedFSCheckMapper() {
}
public Object doIO(Reporter reporter,
String name,
long offset
) throws IOException {
// open file
FSDataInputStream in = null;
Path p = new Path(name);
try {
in = fs.open(p);
} catch(IOException e) {
return name + "@(missing)";
}
in.seek(offset);
long actualSize = 0;
try {
long blockSize = fs.getDefaultBlockSize(p);
reporter.setStatus("reading " + name + "@" +
offset + "/" + blockSize);
for( int curSize = bufferSize;
curSize == bufferSize && actualSize < blockSize;
actualSize += curSize) {
curSize = in.read(buffer, 0, bufferSize);
}
} catch(IOException e) {
LOG.info("Corrupted block detected in \"" + name + "\" at " + offset);
return name + "@" + offset;
} finally {
in.close();
}
return new Long(actualSize);
}
void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
Object corruptedBlock) throws IOException {
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "blocks"),
new Text(String.valueOf(1)));
if (corruptedBlock.getClass().getName().endsWith("String")) {
output.collect(
new Text(AccumulatingReducer.VALUE_TYPE_STRING + "badBlocks"),
new Text((String)corruptedBlock));
return;
}
long totalSize = ((Long)corruptedBlock).longValue();
float ioRateMbSec = (float)totalSize * 1000 / (execTime * 0x100000);
LOG.info("Number of bytes processed = " + totalSize);
LOG.info("Exec time = " + execTime);
LOG.info("IO rate = " + ioRateMbSec);
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
new Text(String.valueOf(totalSize)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
new Text(String.valueOf(execTime)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
new Text(String.valueOf(ioRateMbSec*1000)));
}
}
private void runDistributedFSCheck() throws Exception {
JobConf job = new JobConf(fs.getConf(), DistributedFSCheck.class);
FileInputFormat.setInputPaths(job, MAP_INPUT_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(DistributedFSCheckMapper.class);
job.setReducerClass(AccumulatingReducer.class);
FileOutputFormat.setOutputPath(job, READ_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
public static void main(String[] args) throws Exception {
int testType = TEST_TYPE_READ;
int bufferSize = DEFAULT_BUFFER_SIZE;
String resFileName = DEFAULT_RES_FILE_NAME;
String rootName = "/";
boolean viewStats = false;
String usage = "Usage: DistributedFSCheck [-root name] [-clean] [-resFile resultFileName] [-bufferSize Bytes] [-stats] ";
if (args.length == 1 && args[0].startsWith("-h")) {
System.err.println(usage);
System.exit(-1);
}
for(int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-root")) {
rootName = args[++i];
} else if (args[i].startsWith("-clean")) {
testType = TEST_TYPE_CLEANUP;
} else if (args[i].equals("-bufferSize")) {
bufferSize = Integer.parseInt(args[++i]);
} else if (args[i].equals("-resFile")) {
resFileName = args[++i];
} else if (args[i].startsWith("-stat")) {
viewStats = true;
}
}
LOG.info("root = " + rootName);
LOG.info("bufferSize = " + bufferSize);
Configuration conf = new Configuration();
conf.setInt("test.io.file.buffer.size", bufferSize);
DistributedFSCheck test = new DistributedFSCheck(conf);
if (testType == TEST_TYPE_CLEANUP) {
test.cleanup();
return;
}
test.createInputFile(rootName);
long tStart = System.currentTimeMillis();
test.runDistributedFSCheck();
long execTime = System.currentTimeMillis() - tStart;
test.analyzeResult(execTime, resFileName, viewStats);
// test.cleanup(); // clean up after all to restore the system state
}
private void analyzeResult(long execTime,
String resFileName,
boolean viewStats
) throws IOException {
Path reduceFile= new Path(READ_DIR, "part-00000");
DataInputStream in;
in = new DataInputStream(fs.open(reduceFile));
BufferedReader lines;
lines = new BufferedReader(new InputStreamReader(in));
long blocks = 0;
long size = 0;
long time = 0;
float rate = 0;
StringTokenizer badBlocks = null;
long nrBadBlocks = 0;
String line;
while((line = lines.readLine()) != null) {
StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
String attr = tokens.nextToken();
if (attr.endsWith("blocks"))
blocks = Long.parseLong(tokens.nextToken());
else if (attr.endsWith("size"))
size = Long.parseLong(tokens.nextToken());
else if (attr.endsWith("time"))
time = Long.parseLong(tokens.nextToken());
else if (attr.endsWith("rate"))
rate = Float.parseFloat(tokens.nextToken());
else if (attr.endsWith("badBlocks")) {
badBlocks = new StringTokenizer(tokens.nextToken(), ";");
nrBadBlocks = badBlocks.countTokens();
}
}
Vector<String> resultLines = new Vector<String>();
resultLines.add( "----- DistributedFSCheck ----- : ");
resultLines.add( " Date & time: " + new Date(System.currentTimeMillis()));
resultLines.add( " Total number of blocks: " + blocks);
resultLines.add( " Total number of files: " + nrFiles);
resultLines.add( "Number of corrupted blocks: " + nrBadBlocks);
int nrBadFilesPos = resultLines.size();
TreeSet<String> badFiles = new TreeSet<String>();
long nrBadFiles = 0;
if (nrBadBlocks > 0) {
resultLines.add("");
resultLines.add("----- Corrupted Blocks (file@offset) ----- : ");
while(badBlocks.hasMoreTokens()) {
String curBlock = badBlocks.nextToken();
resultLines.add(curBlock);
badFiles.add(curBlock.substring(0, curBlock.indexOf('@')));
}
nrBadFiles = badFiles.size();
}
resultLines.insertElementAt(" Number of corrupted files: " + nrBadFiles, nrBadFilesPos);
if (viewStats) {
resultLines.add("");
resultLines.add("----- Performance ----- : ");
resultLines.add(" Total MBytes read: " + size/MEGA);
resultLines.add(" Throughput mb/sec: " + (float)size * 1000.0 / (time * MEGA));
resultLines.add(" Average IO rate mb/sec: " + rate / 1000 / blocks);
resultLines.add(" Test exec time sec: " + (float)execTime / 1000);
}
PrintStream res = new PrintStream(
new FileOutputStream(
new File(resFileName), true));
for(int i = 0; i < resultLines.size(); i++) {
String cur = resultLines.get(i);
LOG.info(cur);
res.println(cur);
}
}
private void cleanup() throws IOException {
LOG.info("Cleaning up test files");
fs.delete(TEST_ROOT_DIR, true);
}
}
| 12,767 | 34.368421 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collections;
import java.util.Random;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
import java.util.Map;
import java.util.HashMap;
import java.net.InetSocketAddress;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.lib.LongSumReducer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
public class TestFileSystem extends TestCase {
private static final Log LOG = FileSystem.LOG;
private static Configuration conf = new Configuration();
private static int BUFFER_SIZE = conf.getInt("io.file.buffer.size", 4096);
private static final long MEGA = 1024 * 1024;
private static final int SEEKS_PER_FILE = 4;
private static String ROOT = System.getProperty("test.build.data","fs_test");
private static Path CONTROL_DIR = new Path(ROOT, "fs_control");
private static Path WRITE_DIR = new Path(ROOT, "fs_write");
private static Path READ_DIR = new Path(ROOT, "fs_read");
private static Path DATA_DIR = new Path(ROOT, "fs_data");
public void testFs() throws Exception {
testFs(10 * MEGA, 100, 0);
}
public static void testFs(long megaBytes, int numFiles, long seed)
throws Exception {
FileSystem fs = FileSystem.get(conf);
if (seed == 0)
seed = new Random().nextLong();
LOG.info("seed = "+seed);
createControlFile(fs, megaBytes, numFiles, seed);
writeTest(fs, false);
readTest(fs, false);
seekTest(fs, false);
fs.delete(CONTROL_DIR, true);
fs.delete(DATA_DIR, true);
fs.delete(WRITE_DIR, true);
fs.delete(READ_DIR, true);
}
public static void testCommandFormat() throws Exception {
// This should go to TestFsShell.java when it is added.
CommandFormat cf;
cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
try {
cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1);
fail("Expected parsing to fail as it should stop at first non-option");
}
catch (Exception e) {
// Expected
}
cf = new CommandFormat("tail", 1, 1, "f");
assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");
cf = new CommandFormat("setrep", 2, 2, "R", "w");
assertEquals(cf.parse(new String[] {"-setrep","-R","2","/foo/bar"}, 1).get(1), "/foo/bar");
cf = new CommandFormat("put", 2, 10000);
assertEquals(cf.parse(new String[] {"-put", "-", "dest"}, 1).get(1), "dest");
}
public static void createControlFile(FileSystem fs,
long megaBytes, int numFiles,
long seed) throws Exception {
LOG.info("creating control file: "+megaBytes+" bytes, "+numFiles+" files");
Path controlFile = new Path(CONTROL_DIR, "files");
fs.delete(controlFile, true);
Random random = new Random(seed);
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, controlFile,
Text.class, LongWritable.class, CompressionType.NONE);
long totalSize = 0;
long maxSize = ((megaBytes / numFiles) * 2) + 1;
try {
while (totalSize < megaBytes) {
Text name = new Text(Long.toString(random.nextLong()));
long size = random.nextLong();
if (size < 0)
size = -size;
size = size % maxSize;
//LOG.info(" adding: name="+name+" size="+size);
writer.append(name, new LongWritable(size));
totalSize += size;
}
} finally {
writer.close();
}
LOG.info("created control file for: "+totalSize+" bytes");
}
public static class WriteMapper extends Configured
implements Mapper<Text, LongWritable, Text, LongWritable> {
private Random random = new Random();
private byte[] buffer = new byte[BUFFER_SIZE];
private FileSystem fs;
private boolean fastCheck;
// a random suffix per task
private String suffix = "-"+random.nextLong();
{
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public WriteMapper() { super(null); }
public WriteMapper(Configuration conf) { super(conf); }
public void configure(JobConf job) {
setConf(job);
fastCheck = job.getBoolean("fs.test.fastCheck", false);
}
public void map(Text key, LongWritable value,
OutputCollector<Text, LongWritable> collector,
Reporter reporter)
throws IOException {
String name = key.toString();
long size = value.get();
long seed = Long.parseLong(name);
random.setSeed(seed);
reporter.setStatus("creating " + name);
// write to temp file initially to permit parallel execution
Path tempFile = new Path(DATA_DIR, name+suffix);
OutputStream out = fs.create(tempFile);
long written = 0;
try {
while (written < size) {
if (fastCheck) {
Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE));
} else {
random.nextBytes(buffer);
}
long remains = size - written;
int length = (remains<=buffer.length) ? (int)remains : buffer.length;
out.write(buffer, 0, length);
written += length;
reporter.setStatus("writing "+name+"@"+written+"/"+size);
}
} finally {
out.close();
}
// rename to final location
fs.rename(tempFile, new Path(DATA_DIR, name));
collector.collect(new Text("bytes"), new LongWritable(written));
reporter.setStatus("wrote " + name);
}
public void close() {
}
}
public static void writeTest(FileSystem fs, boolean fastCheck)
throws Exception {
fs.delete(DATA_DIR, true);
fs.delete(WRITE_DIR, true);
JobConf job = new JobConf(conf, TestFileSystem.class);
job.setBoolean("fs.test.fastCheck", fastCheck);
FileInputFormat.setInputPaths(job, CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(WriteMapper.class);
job.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(job, WRITE_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
public static class ReadMapper extends Configured
implements Mapper<Text, LongWritable, Text, LongWritable> {
private Random random = new Random();
private byte[] buffer = new byte[BUFFER_SIZE];
private byte[] check = new byte[BUFFER_SIZE];
private FileSystem fs;
private boolean fastCheck;
{
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public ReadMapper() { super(null); }
public ReadMapper(Configuration conf) { super(conf); }
public void configure(JobConf job) {
setConf(job);
fastCheck = job.getBoolean("fs.test.fastCheck", false);
}
public void map(Text key, LongWritable value,
OutputCollector<Text, LongWritable> collector,
Reporter reporter)
throws IOException {
String name = key.toString();
long size = value.get();
long seed = Long.parseLong(name);
random.setSeed(seed);
reporter.setStatus("opening " + name);
DataInputStream in =
new DataInputStream(fs.open(new Path(DATA_DIR, name)));
long read = 0;
try {
while (read < size) {
long remains = size - read;
int n = (remains<=buffer.length) ? (int)remains : buffer.length;
in.readFully(buffer, 0, n);
read += n;
if (fastCheck) {
Arrays.fill(check, (byte)random.nextInt(Byte.MAX_VALUE));
} else {
random.nextBytes(check);
}
if (n != buffer.length) {
Arrays.fill(buffer, n, buffer.length, (byte)0);
Arrays.fill(check, n, check.length, (byte)0);
}
assertTrue(Arrays.equals(buffer, check));
reporter.setStatus("reading "+name+"@"+read+"/"+size);
}
} finally {
in.close();
}
collector.collect(new Text("bytes"), new LongWritable(read));
reporter.setStatus("read " + name);
}
public void close() {
}
}
public static void readTest(FileSystem fs, boolean fastCheck)
throws Exception {
fs.delete(READ_DIR, true);
JobConf job = new JobConf(conf, TestFileSystem.class);
job.setBoolean("fs.test.fastCheck", fastCheck);
FileInputFormat.setInputPaths(job, CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(ReadMapper.class);
job.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(job, READ_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
public static class SeekMapper<K> extends Configured
implements Mapper<Text, LongWritable, K, LongWritable> {
private Random random = new Random();
private byte[] check = new byte[BUFFER_SIZE];
private FileSystem fs;
private boolean fastCheck;
{
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public SeekMapper() { super(null); }
public SeekMapper(Configuration conf) { super(conf); }
public void configure(JobConf job) {
setConf(job);
fastCheck = job.getBoolean("fs.test.fastCheck", false);
}
public void map(Text key, LongWritable value,
OutputCollector<K, LongWritable> collector,
Reporter reporter)
throws IOException {
String name = key.toString();
long size = value.get();
long seed = Long.parseLong(name);
if (size == 0) return;
reporter.setStatus("opening " + name);
FSDataInputStream in = fs.open(new Path(DATA_DIR, name));
try {
for (int i = 0; i < SEEKS_PER_FILE; i++) {
// generate a random position
long position = Math.abs(random.nextLong()) % size;
// seek file to that position
reporter.setStatus("seeking " + name);
in.seek(position);
byte b = in.readByte();
// check that byte matches
byte checkByte = 0;
// advance random state to that position
random.setSeed(seed);
for (int p = 0; p <= position; p+= check.length) {
reporter.setStatus("generating data for " + name);
if (fastCheck) {
checkByte = (byte)random.nextInt(Byte.MAX_VALUE);
} else {
random.nextBytes(check);
checkByte = check[(int)(position % check.length)];
}
}
assertEquals(b, checkByte);
}
} finally {
in.close();
}
}
public void close() {
}
}
public static void seekTest(FileSystem fs, boolean fastCheck)
throws Exception {
fs.delete(READ_DIR, true);
JobConf job = new JobConf(conf, TestFileSystem.class);
job.setBoolean("fs.test.fastCheck", fastCheck);
FileInputFormat.setInputPaths(job,CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(SeekMapper.class);
job.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(job, READ_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
public static void main(String[] args) throws Exception {
int megaBytes = 10;
int files = 100;
boolean noRead = false;
boolean noWrite = false;
boolean noSeek = false;
boolean fastCheck = false;
long seed = new Random().nextLong();
String usage = "Usage: TestFileSystem -files N -megaBytes M [-noread] [-nowrite] [-noseek] [-fastcheck]";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-files")) {
files = Integer.parseInt(args[++i]);
} else if (args[i].equals("-megaBytes")) {
megaBytes = Integer.parseInt(args[++i]);
} else if (args[i].equals("-noread")) {
noRead = true;
} else if (args[i].equals("-nowrite")) {
noWrite = true;
} else if (args[i].equals("-noseek")) {
noSeek = true;
} else if (args[i].equals("-fastcheck")) {
fastCheck = true;
}
}
LOG.info("seed = "+seed);
LOG.info("files = " + files);
LOG.info("megaBytes = " + megaBytes);
FileSystem fs = FileSystem.get(conf);
if (!noWrite) {
createControlFile(fs, megaBytes*MEGA, files, seed);
writeTest(fs, fastCheck);
}
if (!noRead) {
readTest(fs, fastCheck);
}
if (!noSeek) {
seekTest(fs, fastCheck);
}
}
public void testFsCache() throws Exception {
{
long now = System.currentTimeMillis();
String[] users = new String[]{"foo","bar"};
final Configuration conf = new Configuration();
FileSystem[] fs = new FileSystem[users.length];
for(int i = 0; i < users.length; i++) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}});
for(int j = 0; j < i; j++) {
assertFalse(fs[j] == fs[i]);
}
}
FileSystem.closeAll();
}
{
try {
runTestCache(NameNode.DEFAULT_PORT);
} catch(java.net.BindException be) {
LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
+ NameNode.DEFAULT_PORT + ")", be);
}
runTestCache(0);
}
}
static void runTestCache(int port) throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(port)
.numDataNodes(2).build();
URI uri = cluster.getFileSystem().getUri();
LOG.info("uri=" + uri);
{
FileSystem fs = FileSystem.get(uri, new Configuration());
checkPath(cluster, fs);
for(int i = 0; i < 100; i++) {
assertTrue(fs == FileSystem.get(uri, new Configuration()));
}
}
if (port == NameNode.DEFAULT_PORT) {
//test explicit default port
URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
uri.getQuery(), uri.getFragment());
LOG.info("uri2=" + uri2);
FileSystem fs = FileSystem.get(uri2, conf);
checkPath(cluster, fs);
for(int i = 0; i < 100; i++) {
assertTrue(fs == FileSystem.get(uri2, new Configuration()));
}
}
} finally {
if (cluster != null) cluster.shutdown();
}
}
static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws IOException {
InetSocketAddress add = cluster.getNameNode().getNameNodeAddress();
// Test upper/lower case
fileSys.checkPath(new Path("hdfs://"
+ StringUtils.toUpperCase(add.getHostName()) + ":" + add.getPort()));
}
public void testFsClose() throws Exception {
{
Configuration conf = new Configuration();
new Path("file:///").getFileSystem(conf);
FileSystem.closeAll();
}
{
Configuration conf = new Configuration();
new Path("hftp://localhost:12345/").getFileSystem(conf);
FileSystem.closeAll();
}
{
Configuration conf = new Configuration();
FileSystem fs = new Path("hftp://localhost:12345/").getFileSystem(conf);
FileSystem.closeAll();
}
}
public void testFsShutdownHook() throws Exception {
final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
Configuration conf = new Configuration();
Configuration confNoAuto = new Configuration();
conf.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
confNoAuto.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
confNoAuto.setBoolean("fs.automatic.close", false);
TestShutdownFileSystem fsWithAuto =
(TestShutdownFileSystem)(new Path("test://a/").getFileSystem(conf));
TestShutdownFileSystem fsWithoutAuto =
(TestShutdownFileSystem)(new Path("test://b/").getFileSystem(confNoAuto));
fsWithAuto.setClosedSet(closed);
fsWithoutAuto.setClosedSet(closed);
// Different URIs should result in different FS instances
assertNotSame(fsWithAuto, fsWithoutAuto);
FileSystem.CACHE.closeAll(true);
assertEquals(1, closed.size());
assertTrue(closed.contains(fsWithAuto));
closed.clear();
FileSystem.closeAll();
assertEquals(1, closed.size());
assertTrue(closed.contains(fsWithoutAuto));
}
public void testCacheKeysAreCaseInsensitive()
throws Exception
{
Configuration conf = new Configuration();
// check basic equality
FileSystem.Cache.Key lowercaseCachekey1 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf);
FileSystem.Cache.Key lowercaseCachekey2 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf);
assertEquals( lowercaseCachekey1, lowercaseCachekey2 );
// check insensitive equality
FileSystem.Cache.Key uppercaseCachekey = new FileSystem.Cache.Key(new URI("HFTP://Localhost:12345/"), conf);
assertEquals( lowercaseCachekey2, uppercaseCachekey );
// check behaviour with collections
List<FileSystem.Cache.Key> list = new ArrayList<FileSystem.Cache.Key>();
list.add(uppercaseCachekey);
assertTrue(list.contains(uppercaseCachekey));
assertTrue(list.contains(lowercaseCachekey2));
Set<FileSystem.Cache.Key> set = new HashSet<FileSystem.Cache.Key>();
set.add(uppercaseCachekey);
assertTrue(set.contains(uppercaseCachekey));
assertTrue(set.contains(lowercaseCachekey2));
Map<FileSystem.Cache.Key, String> map = new HashMap<FileSystem.Cache.Key, String>();
map.put(uppercaseCachekey, "");
assertTrue(map.containsKey(uppercaseCachekey));
assertTrue(map.containsKey(lowercaseCachekey2));
}
public static void testFsUniqueness(long megaBytes, int numFiles, long seed)
throws Exception {
// multiple invocations of FileSystem.get return the same object.
FileSystem fs1 = FileSystem.get(conf);
FileSystem fs2 = FileSystem.get(conf);
assertTrue(fs1 == fs2);
// multiple invocations of FileSystem.newInstance return different objects
fs1 = FileSystem.newInstance(conf);
fs2 = FileSystem.newInstance(conf);
assertTrue(fs1 != fs2 && !fs1.equals(fs2));
fs1.close();
fs2.close();
}
public static class TestShutdownFileSystem extends RawLocalFileSystem {
private Set<FileSystem> closedSet;
public void setClosedSet(Set<FileSystem> closedSet) {
this.closedSet = closedSet;
}
public void close() throws IOException {
if (closedSet != null) {
closedSet.add(this);
}
super.close();
}
}
}
| 21,233 | 30.318584 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
/**
* Base mapper class for IO operations.
* <p>
* Two abstract method {@link #doIO(Reporter, String, long)} and
* {@link #collectStats(OutputCollector,String,long,Object)} should be
* overloaded in derived classes to define the IO operation and the
* statistics data to be collected by subsequent reducers.
*
*/
public abstract class IOMapperBase<T> extends Configured
implements Mapper<Text, LongWritable, Text, Text> {
protected byte[] buffer;
protected int bufferSize;
protected FileSystem fs;
protected String hostName;
protected Closeable stream;
public IOMapperBase() {
}
public void configure(JobConf conf) {
setConf(conf);
try {
fs = FileSystem.get(conf);
} catch (Exception e) {
throw new RuntimeException("Cannot create file system.", e);
}
bufferSize = conf.getInt("test.io.file.buffer.size", 4096);
buffer = new byte[bufferSize];
try {
hostName = InetAddress.getLocalHost().getHostName();
} catch(Exception e) {
hostName = "localhost";
}
}
public void close() throws IOException {
}
/**
* Perform io operation, usually read or write.
*
* @param reporter
* @param name file name
* @param value offset within the file
* @return object that is passed as a parameter to
* {@link #collectStats(OutputCollector,String,long,Object)}
* @throws IOException
*/
abstract T doIO(Reporter reporter,
String name,
long value) throws IOException;
/**
* Create an input or output stream based on the specified file.
* Subclasses should override this method to provide an actual stream.
*
* @param name file name
* @return the stream
* @throws IOException
*/
public Closeable getIOStream(String name) throws IOException {
return null;
}
/**
* Collect stat data to be combined by a subsequent reducer.
*
* @param output
* @param name file name
* @param execTime IO execution time
* @param doIOReturnValue value returned by {@link #doIO(Reporter,String,long)}
* @throws IOException
*/
abstract void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
T doIOReturnValue) throws IOException;
/**
* Map file name and offset into statistical data.
* <p>
* The map task is to get the
* <tt>key</tt>, which contains the file name, and the
* <tt>value</tt>, which is the offset within the file.
*
* The parameters are passed to the abstract method
* {@link #doIO(Reporter,String,long)}, which performs the io operation,
* usually read or write data, and then
* {@link #collectStats(OutputCollector,String,long,Object)}
* is called to prepare stat data for a subsequent reducer.
*/
public void map(Text key,
LongWritable value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String name = key.toString();
long longValue = value.get();
reporter.setStatus("starting " + name + " ::host = " + hostName);
this.stream = getIOStream(name);
T statValue = null;
long tStart = System.currentTimeMillis();
try {
statValue = doIO(reporter, name, longValue);
} finally {
if(stream != null) stream.close();
}
long tEnd = System.currentTimeMillis();
long execTime = tEnd - tStart;
collectStats(output, name, execTime, statValue);
reporter.setStatus("finished " + name + " ::host = " + hostName);
}
}
| 4,732 | 31.641379 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.Date;
import java.util.Random;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Distributed i/o benchmark.
* <p>
* This test writes into or reads from a specified number of files.
* Number of bytes to write or read is specified as a parameter to the test.
* Each file is accessed in a separate map task.
* <p>
* The reducer collects the following statistics:
* <ul>
* <li>number of tasks completed</li>
* <li>number of bytes written/read</li>
* <li>execution time</li>
* <li>io rate</li>
* <li>io rate squared</li>
* </ul>
*
* Finally, the following information is appended to a local file
* <ul>
* <li>read or write test</li>
* <li>date and time the test finished</li>
* <li>number of files</li>
* <li>total number of bytes processed</li>
* <li>throughput in mb/sec (total number of bytes / sum of processing times)</li>
* <li>average i/o rate in mb/sec per file</li>
* <li>standard deviation of i/o rate </li>
* </ul>
*/
public class TestDFSIO implements Tool {
// Constants
private static final Log LOG = LogFactory.getLog(TestDFSIO.class);
private static final int DEFAULT_BUFFER_SIZE = 1000000;
private static final String BASE_FILE_NAME = "test_io_";
private static final String DEFAULT_RES_FILE_NAME = "TestDFSIO_results.log";
private static final long MEGA = ByteMultiple.MB.value();
private static final int DEFAULT_NR_BYTES = 128;
private static final int DEFAULT_NR_FILES = 4;
private static final String USAGE =
"Usage: " + TestDFSIO.class.getSimpleName() +
" [genericOptions]" +
" -read [-random | -backward | -skip [-skipSize Size]] |" +
" -write | -append | -truncate | -clean" +
" [-compression codecClassName]" +
" [-nrFiles N]" +
" [-size Size[B|KB|MB|GB|TB]]" +
" [-resFile resultFileName] [-bufferSize Bytes]" +
" [-rootDir]";
private Configuration config;
static{
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
Configuration.addDefaultResource("mapred-default.xml");
Configuration.addDefaultResource("mapred-site.xml");
}
private static enum TestType {
TEST_TYPE_READ("read"),
TEST_TYPE_WRITE("write"),
TEST_TYPE_CLEANUP("cleanup"),
TEST_TYPE_APPEND("append"),
TEST_TYPE_READ_RANDOM("random read"),
TEST_TYPE_READ_BACKWARD("backward read"),
TEST_TYPE_READ_SKIP("skip read"),
TEST_TYPE_TRUNCATE("truncate");
private String type;
private TestType(String t) {
type = t;
}
@Override // String
public String toString() {
return type;
}
}
static enum ByteMultiple {
B(1L),
KB(0x400L),
MB(0x100000L),
GB(0x40000000L),
TB(0x10000000000L);
private long multiplier;
private ByteMultiple(long mult) {
multiplier = mult;
}
long value() {
return multiplier;
}
static ByteMultiple parseString(String sMultiple) {
if(sMultiple == null || sMultiple.isEmpty()) // MB by default
return MB;
String sMU = StringUtils.toUpperCase(sMultiple);
if(StringUtils.toUpperCase(B.name()).endsWith(sMU))
return B;
if(StringUtils.toUpperCase(KB.name()).endsWith(sMU))
return KB;
if(StringUtils.toUpperCase(MB.name()).endsWith(sMU))
return MB;
if(StringUtils.toUpperCase(GB.name()).endsWith(sMU))
return GB;
if(StringUtils.toUpperCase(TB.name()).endsWith(sMU))
return TB;
throw new IllegalArgumentException("Unsupported ByteMultiple "+sMultiple);
}
}
public TestDFSIO() {
this.config = new Configuration();
}
private static String getBaseDir(Configuration conf) {
return conf.get("test.build.data","/benchmarks/TestDFSIO");
}
private static Path getControlDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_control");
}
private static Path getWriteDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_write");
}
private static Path getReadDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_read");
}
private static Path getAppendDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_append");
}
private static Path getRandomReadDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_random_read");
}
private static Path getTruncateDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_truncate");
}
private static Path getDataDir(Configuration conf) {
return new Path(getBaseDir(conf), "io_data");
}
private static MiniDFSCluster cluster;
private static TestDFSIO bench;
@BeforeClass
public static void beforeClass() throws Exception {
bench = new TestDFSIO();
bench.getConf().setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
cluster = new MiniDFSCluster.Builder(bench.getConf())
.numDataNodes(2)
.format(true)
.build();
FileSystem fs = cluster.getFileSystem();
bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);
/** Check write here, as it is required for other tests */
testWrite();
}
@AfterClass
public static void afterClass() throws Exception {
if(cluster == null)
return;
FileSystem fs = cluster.getFileSystem();
bench.cleanup(fs);
cluster.shutdown();
}
public static void testWrite() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.writeTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
}
@Test (timeout = 3000)
public void testRead() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.readTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_READ, execTime);
}
@Test (timeout = 3000)
public void testReadRandom() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.getConf().setLong("test.io.skip.size", 0);
bench.randomReadTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_RANDOM, execTime);
}
@Test (timeout = 3000)
public void testReadBackward() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.getConf().setLong("test.io.skip.size", -DEFAULT_BUFFER_SIZE);
bench.randomReadTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_BACKWARD, execTime);
}
@Test (timeout = 3000)
public void testReadSkip() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.getConf().setLong("test.io.skip.size", 1);
bench.randomReadTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
}
@Test (timeout = 6000)
public void testAppend() throws Exception {
FileSystem fs = cluster.getFileSystem();
long tStart = System.currentTimeMillis();
bench.appendTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_APPEND, execTime);
}
@Test (timeout = 60000)
public void testTruncate() throws Exception {
FileSystem fs = cluster.getFileSystem();
bench.createControlFile(fs, DEFAULT_NR_BYTES / 2, DEFAULT_NR_FILES);
long tStart = System.currentTimeMillis();
bench.truncateTest(fs);
long execTime = System.currentTimeMillis() - tStart;
bench.analyzeResult(fs, TestType.TEST_TYPE_TRUNCATE, execTime);
}
@SuppressWarnings("deprecation")
private void createControlFile(FileSystem fs,
long nrBytes, // in bytes
int nrFiles
) throws IOException {
LOG.info("creating control file: "+nrBytes+" bytes, "+nrFiles+" files");
Path controlDir = getControlDir(config);
fs.delete(controlDir, true);
for(int i=0; i < nrFiles; i++) {
String name = getFileName(i);
Path controlFile = new Path(controlDir, "in_file_" + name);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, config, controlFile,
Text.class, LongWritable.class,
CompressionType.NONE);
writer.append(new Text(name), new LongWritable(nrBytes));
} catch(Exception e) {
throw new IOException(e.getLocalizedMessage());
} finally {
if (writer != null)
writer.close();
writer = null;
}
}
LOG.info("created control files for: "+nrFiles+" files");
}
private static String getFileName(int fIdx) {
return BASE_FILE_NAME + Integer.toString(fIdx);
}
/**
* Write/Read mapper base class.
* <p>
* Collects the following statistics per task:
* <ul>
* <li>number of tasks completed</li>
* <li>number of bytes written/read</li>
* <li>execution time</li>
* <li>i/o rate</li>
* <li>i/o rate squared</li>
* </ul>
*/
private abstract static class IOStatMapper extends IOMapperBase<Long> {
protected CompressionCodec compressionCodec;
IOStatMapper() {
}
@Override // Mapper
public void configure(JobConf conf) {
super.configure(conf);
// grab compression
String compression = getConf().get("test.io.compression.class", null);
Class<? extends CompressionCodec> codec;
// try to initialize codec
try {
codec = (compression == null) ? null :
Class.forName(compression).asSubclass(CompressionCodec.class);
} catch(Exception e) {
throw new RuntimeException("Compression codec not found: ", e);
}
if(codec != null) {
compressionCodec = (CompressionCodec)
ReflectionUtils.newInstance(codec, getConf());
}
}
@Override // IOMapperBase
void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
Long objSize) throws IOException {
long totalSize = objSize.longValue();
float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
LOG.info("Number of bytes processed = " + totalSize);
LOG.info("Exec time = " + execTime);
LOG.info("IO rate = " + ioRateMbSec);
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
new Text(String.valueOf(1)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
new Text(String.valueOf(totalSize)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
new Text(String.valueOf(execTime)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
new Text(String.valueOf(ioRateMbSec*1000)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
}
}
/**
* Write mapper class.
*/
public static class WriteMapper extends IOStatMapper {
public WriteMapper() {
for(int i=0; i < bufferSize; i++)
buffer[i] = (byte)('0' + i % 50);
}
@Override // IOMapperBase
public Closeable getIOStream(String name) throws IOException {
// create file
OutputStream out =
fs.create(new Path(getDataDir(getConf()), name), true, bufferSize);
if(compressionCodec != null)
out = compressionCodec.createOutputStream(out);
LOG.info("out = " + out.getClass().getName());
return out;
}
@Override // IOMapperBase
public Long doIO(Reporter reporter,
String name,
long totalSize // in bytes
) throws IOException {
OutputStream out = (OutputStream)this.stream;
// write to the file
long nrRemaining;
for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining;
out.write(buffer, 0, curSize);
reporter.setStatus("writing " + name + "@" +
(totalSize - nrRemaining) + "/" + totalSize
+ " ::host = " + hostName);
}
return Long.valueOf(totalSize);
}
}
private void writeTest(FileSystem fs) throws IOException {
Path writeDir = getWriteDir(config);
fs.delete(getDataDir(config), true);
fs.delete(writeDir, true);
runIOTest(WriteMapper.class, writeDir);
}
private void runIOTest(
Class<? extends Mapper<Text, LongWritable, Text, Text>> mapperClass,
Path outputDir) throws IOException {
JobConf job = new JobConf(config, TestDFSIO.class);
FileInputFormat.setInputPaths(job, getControlDir(config));
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(mapperClass);
job.setReducerClass(AccumulatingReducer.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
/**
* Append mapper class.
*/
public static class AppendMapper extends IOStatMapper {
public AppendMapper() {
for(int i=0; i < bufferSize; i++)
buffer[i] = (byte)('0' + i % 50);
}
@Override // IOMapperBase
public Closeable getIOStream(String name) throws IOException {
// open file for append
OutputStream out =
fs.append(new Path(getDataDir(getConf()), name), bufferSize);
if(compressionCodec != null)
out = compressionCodec.createOutputStream(out);
LOG.info("out = " + out.getClass().getName());
return out;
}
@Override // IOMapperBase
public Long doIO(Reporter reporter,
String name,
long totalSize // in bytes
) throws IOException {
OutputStream out = (OutputStream)this.stream;
// write to the file
long nrRemaining;
for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining;
out.write(buffer, 0, curSize);
reporter.setStatus("writing " + name + "@" +
(totalSize - nrRemaining) + "/" + totalSize
+ " ::host = " + hostName);
}
return Long.valueOf(totalSize);
}
}
private void appendTest(FileSystem fs) throws IOException {
Path appendDir = getAppendDir(config);
fs.delete(appendDir, true);
runIOTest(AppendMapper.class, appendDir);
}
/**
* Read mapper class.
*/
public static class ReadMapper extends IOStatMapper {
public ReadMapper() {
}
@Override // IOMapperBase
public Closeable getIOStream(String name) throws IOException {
// open file
InputStream in = fs.open(new Path(getDataDir(getConf()), name));
if(compressionCodec != null)
in = compressionCodec.createInputStream(in);
LOG.info("in = " + in.getClass().getName());
return in;
}
@Override // IOMapperBase
public Long doIO(Reporter reporter,
String name,
long totalSize // in bytes
) throws IOException {
InputStream in = (InputStream)this.stream;
long actualSize = 0;
while (actualSize < totalSize) {
int curSize = in.read(buffer, 0, bufferSize);
if(curSize < 0) break;
actualSize += curSize;
reporter.setStatus("reading " + name + "@" +
actualSize + "/" + totalSize
+ " ::host = " + hostName);
}
return Long.valueOf(actualSize);
}
}
private void readTest(FileSystem fs) throws IOException {
Path readDir = getReadDir(config);
fs.delete(readDir, true);
runIOTest(ReadMapper.class, readDir);
}
/**
* Mapper class for random reads.
* The mapper chooses a position in the file and reads bufferSize
* bytes starting at the chosen position.
* It stops after reading the totalSize bytes, specified by -size.
*
* There are three type of reads.
* 1) Random read always chooses a random position to read from: skipSize = 0
* 2) Backward read reads file in reverse order : skipSize < 0
* 3) Skip-read skips skipSize bytes after every read : skipSize > 0
*/
public static class RandomReadMapper extends IOStatMapper {
private Random rnd;
private long fileSize;
private long skipSize;
@Override // Mapper
public void configure(JobConf conf) {
super.configure(conf);
skipSize = conf.getLong("test.io.skip.size", 0);
}
public RandomReadMapper() {
rnd = new Random();
}
@Override // IOMapperBase
public Closeable getIOStream(String name) throws IOException {
Path filePath = new Path(getDataDir(getConf()), name);
this.fileSize = fs.getFileStatus(filePath).getLen();
InputStream in = fs.open(filePath);
if(compressionCodec != null)
in = new FSDataInputStream(compressionCodec.createInputStream(in));
LOG.info("in = " + in.getClass().getName());
LOG.info("skipSize = " + skipSize);
return in;
}
@Override // IOMapperBase
public Long doIO(Reporter reporter,
String name,
long totalSize // in bytes
) throws IOException {
PositionedReadable in = (PositionedReadable)this.stream;
long actualSize = 0;
for(long pos = nextOffset(-1);
actualSize < totalSize; pos = nextOffset(pos)) {
int curSize = in.read(pos, buffer, 0, bufferSize);
if(curSize < 0) break;
actualSize += curSize;
reporter.setStatus("reading " + name + "@" +
actualSize + "/" + totalSize
+ " ::host = " + hostName);
}
return Long.valueOf(actualSize);
}
/**
* Get next offset for reading.
* If current < 0 then choose initial offset according to the read type.
*
* @param current offset
* @return
*/
private long nextOffset(long current) {
if(skipSize == 0)
return rnd.nextInt((int)(fileSize));
if(skipSize > 0)
return (current < 0) ? 0 : (current + bufferSize + skipSize);
// skipSize < 0
return (current < 0) ? Math.max(0, fileSize - bufferSize) :
Math.max(0, current + skipSize);
}
}
private void randomReadTest(FileSystem fs) throws IOException {
Path readDir = getRandomReadDir(config);
fs.delete(readDir, true);
runIOTest(RandomReadMapper.class, readDir);
}
/**
* Truncate mapper class.
* The mapper truncates given file to the newLength, specified by -size.
*/
public static class TruncateMapper extends IOStatMapper {
private static final long DELAY = 100L;
private Path filePath;
private long fileSize;
@Override // IOMapperBase
public Closeable getIOStream(String name) throws IOException {
filePath = new Path(getDataDir(getConf()), name);
fileSize = fs.getFileStatus(filePath).getLen();
return null;
}
@Override // IOMapperBase
public Long doIO(Reporter reporter,
String name,
long newLength // in bytes
) throws IOException {
boolean isClosed = fs.truncate(filePath, newLength);
reporter.setStatus("truncating " + name + " to newLength " +
newLength + " ::host = " + hostName);
for(int i = 0; !isClosed; i++) {
try {
Thread.sleep(DELAY);
} catch (InterruptedException ignored) {}
FileStatus status = fs.getFileStatus(filePath);
assert status != null : "status is null";
isClosed = (status.getLen() == newLength);
reporter.setStatus("truncate recover for " + name + " to newLength " +
newLength + " attempt " + i + " ::host = " + hostName);
}
return Long.valueOf(fileSize - newLength);
}
}
private void truncateTest(FileSystem fs) throws IOException {
Path TruncateDir = getTruncateDir(config);
fs.delete(TruncateDir, true);
runIOTest(TruncateMapper.class, TruncateDir);
}
private void sequentialTest(FileSystem fs,
TestType testType,
long fileSize, // in bytes
int nrFiles
) throws IOException {
IOStatMapper ioer = null;
switch(testType) {
case TEST_TYPE_READ:
ioer = new ReadMapper();
break;
case TEST_TYPE_WRITE:
ioer = new WriteMapper();
break;
case TEST_TYPE_APPEND:
ioer = new AppendMapper();
break;
case TEST_TYPE_READ_RANDOM:
case TEST_TYPE_READ_BACKWARD:
case TEST_TYPE_READ_SKIP:
ioer = new RandomReadMapper();
break;
case TEST_TYPE_TRUNCATE:
ioer = new TruncateMapper();
break;
default:
return;
}
for(int i=0; i < nrFiles; i++)
ioer.doIO(Reporter.NULL,
BASE_FILE_NAME+Integer.toString(i),
fileSize);
}
public static void main(String[] args) {
TestDFSIO bench = new TestDFSIO();
int res = -1;
try {
res = ToolRunner.run(bench, args);
} catch(Exception e) {
System.err.print(StringUtils.stringifyException(e));
res = -2;
}
if(res == -1)
System.err.print(USAGE);
System.exit(res);
}
@Override // Tool
public int run(String[] args) throws IOException {
TestType testType = null;
int bufferSize = DEFAULT_BUFFER_SIZE;
long nrBytes = 1*MEGA;
int nrFiles = 1;
long skipSize = 0;
String resFileName = DEFAULT_RES_FILE_NAME;
String compressionClass = null;
boolean isSequential = false;
String version = TestDFSIO.class.getSimpleName() + ".1.8";
LOG.info(version);
if (args.length == 0) {
System.err.println("Missing arguments.");
return -1;
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].startsWith("-read")) {
testType = TestType.TEST_TYPE_READ;
} else if (args[i].equals("-write")) {
testType = TestType.TEST_TYPE_WRITE;
} else if (args[i].equals("-append")) {
testType = TestType.TEST_TYPE_APPEND;
} else if (args[i].equals("-random")) {
if(testType != TestType.TEST_TYPE_READ) return -1;
testType = TestType.TEST_TYPE_READ_RANDOM;
} else if (args[i].equals("-backward")) {
if(testType != TestType.TEST_TYPE_READ) return -1;
testType = TestType.TEST_TYPE_READ_BACKWARD;
} else if (args[i].equals("-skip")) {
if(testType != TestType.TEST_TYPE_READ) return -1;
testType = TestType.TEST_TYPE_READ_SKIP;
} else if (args[i].equalsIgnoreCase("-truncate")) {
testType = TestType.TEST_TYPE_TRUNCATE;
} else if (args[i].equals("-clean")) {
testType = TestType.TEST_TYPE_CLEANUP;
} else if (args[i].startsWith("-seq")) {
isSequential = true;
} else if (args[i].startsWith("-compression")) {
compressionClass = args[++i];
} else if (args[i].equals("-nrFiles")) {
nrFiles = Integer.parseInt(args[++i]);
} else if (args[i].equals("-fileSize") || args[i].equals("-size")) {
nrBytes = parseSize(args[++i]);
} else if (args[i].equals("-skipSize")) {
skipSize = parseSize(args[++i]);
} else if (args[i].equals("-bufferSize")) {
bufferSize = Integer.parseInt(args[++i]);
} else if (args[i].equals("-resFile")) {
resFileName = args[++i];
} else {
System.err.println("Illegal argument: " + args[i]);
return -1;
}
}
if(testType == null)
return -1;
if(testType == TestType.TEST_TYPE_READ_BACKWARD)
skipSize = -bufferSize;
else if(testType == TestType.TEST_TYPE_READ_SKIP && skipSize == 0)
skipSize = bufferSize;
LOG.info("nrFiles = " + nrFiles);
LOG.info("nrBytes (MB) = " + toMB(nrBytes));
LOG.info("bufferSize = " + bufferSize);
if(skipSize > 0)
LOG.info("skipSize = " + skipSize);
LOG.info("baseDir = " + getBaseDir(config));
if(compressionClass != null) {
config.set("test.io.compression.class", compressionClass);
LOG.info("compressionClass = " + compressionClass);
}
config.setInt("test.io.file.buffer.size", bufferSize);
config.setLong("test.io.skip.size", skipSize);
config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
FileSystem fs = FileSystem.get(config);
if (isSequential) {
long tStart = System.currentTimeMillis();
sequentialTest(fs, testType, nrBytes, nrFiles);
long execTime = System.currentTimeMillis() - tStart;
String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
LOG.info(resultLine);
return 0;
}
if (testType == TestType.TEST_TYPE_CLEANUP) {
cleanup(fs);
return 0;
}
createControlFile(fs, nrBytes, nrFiles);
long tStart = System.currentTimeMillis();
switch(testType) {
case TEST_TYPE_WRITE:
writeTest(fs);
break;
case TEST_TYPE_READ:
readTest(fs);
break;
case TEST_TYPE_APPEND:
appendTest(fs);
break;
case TEST_TYPE_READ_RANDOM:
case TEST_TYPE_READ_BACKWARD:
case TEST_TYPE_READ_SKIP:
randomReadTest(fs);
break;
case TEST_TYPE_TRUNCATE:
truncateTest(fs);
break;
default:
}
long execTime = System.currentTimeMillis() - tStart;
analyzeResult(fs, testType, execTime, resFileName);
return 0;
}
@Override // Configurable
public Configuration getConf() {
return this.config;
}
@Override // Configurable
public void setConf(Configuration conf) {
this.config = conf;
}
/**
* Returns size in bytes.
*
* @param arg = {d}[B|KB|MB|GB|TB]
* @return
*/
static long parseSize(String arg) {
String[] args = arg.split("\\D", 2); // get digits
assert args.length <= 2;
long nrBytes = Long.parseLong(args[0]);
String bytesMult = arg.substring(args[0].length()); // get byte multiple
return nrBytes * ByteMultiple.parseString(bytesMult).value();
}
static float toMB(long bytes) {
return ((float)bytes)/MEGA;
}
private void analyzeResult( FileSystem fs,
TestType testType,
long execTime,
String resFileName
) throws IOException {
Path reduceFile = getReduceFilePath(testType);
long tasks = 0;
long size = 0;
long time = 0;
float rate = 0;
float sqrate = 0;
DataInputStream in = null;
BufferedReader lines = null;
try {
in = new DataInputStream(fs.open(reduceFile));
lines = new BufferedReader(new InputStreamReader(in));
String line;
while((line = lines.readLine()) != null) {
StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
String attr = tokens.nextToken();
if (attr.endsWith(":tasks"))
tasks = Long.parseLong(tokens.nextToken());
else if (attr.endsWith(":size"))
size = Long.parseLong(tokens.nextToken());
else if (attr.endsWith(":time"))
time = Long.parseLong(tokens.nextToken());
else if (attr.endsWith(":rate"))
rate = Float.parseFloat(tokens.nextToken());
else if (attr.endsWith(":sqrate"))
sqrate = Float.parseFloat(tokens.nextToken());
}
} finally {
if(in != null) in.close();
if(lines != null) lines.close();
}
double med = rate / 1000 / tasks;
double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
String resultLines[] = {
"----- TestDFSIO ----- : " + testType,
" Date & time: " + new Date(System.currentTimeMillis()),
" Number of files: " + tasks,
"Total MBytes processed: " + toMB(size),
" Throughput mb/sec: " + size * 1000.0 / (time * MEGA),
"Average IO rate mb/sec: " + med,
" IO rate std deviation: " + stdDev,
" Test exec time sec: " + (float)execTime / 1000,
"" };
PrintStream res = null;
try {
res = new PrintStream(new FileOutputStream(new File(resFileName), true));
for(int i = 0; i < resultLines.length; i++) {
LOG.info(resultLines[i]);
res.println(resultLines[i]);
}
} finally {
if(res != null) res.close();
}
}
private Path getReduceFilePath(TestType testType) {
switch(testType) {
case TEST_TYPE_WRITE:
return new Path(getWriteDir(config), "part-00000");
case TEST_TYPE_APPEND:
return new Path(getAppendDir(config), "part-00000");
case TEST_TYPE_READ:
return new Path(getReadDir(config), "part-00000");
case TEST_TYPE_READ_RANDOM:
case TEST_TYPE_READ_BACKWARD:
case TEST_TYPE_READ_SKIP:
return new Path(getRandomReadDir(config), "part-00000");
case TEST_TYPE_TRUNCATE:
return new Path(getTruncateDir(config), "part-00000");
default:
}
return null;
}
private void analyzeResult(FileSystem fs, TestType testType, long execTime)
throws IOException {
String dir = System.getProperty("test.build.dir", "target/test-dir");
analyzeResult(fs, testType, execTime, dir + "/" + DEFAULT_RES_FILE_NAME);
}
private void cleanup(FileSystem fs)
throws IOException {
LOG.info("Cleaning up test files");
fs.delete(new Path(getBaseDir(config)), true);
}
}
| 32,651 | 32.906542 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
/**
* Job History Log Analyzer.
*
* <h3>Description.</h3>
* This a tool for parsing and analyzing history logs of map-reduce jobs.
* History logs contain information about execution of jobs, tasks, and
* attempts. This tool focuses on submission, launch, start, and finish times,
* as well as the success or failure of jobs, tasks, and attempts.
* <p>
* The analyzer calculates <em>per hour slot utilization</em> for the cluster
* as follows.
* For each task attempt it divides the time segment from the start of the
* attempt t<sub>S</sub> to the finish t<sub>F</sub> into whole hours
* [t<sub>0</sub>, ..., t<sub>n</sub>], where t<sub>0</sub> <= t<sub>S</sub>
* is the maximal whole hour preceding t<sub>S</sub>, and
* t<sub>n</sub> >= t<sub>F</sub> is the minimal whole hour after t<sub>F</sub>.
* Thus, [t<sub>0</sub>, ..., t<sub>n</sub>] covers the segment
* [t<sub>S</sub>, t<sub>F</sub>], during which the attempt was executed.
* Each interval [t<sub>i</sub>, t<sub>i+1</sub>] fully contained in
* [t<sub>S</sub>, t<sub>F</sub>] corresponds to exactly one slot on
* a map-reduce cluster (usually MAP-slot or REDUCE-slot).
* If interval [t<sub>i</sub>, t<sub>i+1</sub>] only intersects with
* [t<sub>S</sub>, t<sub>F</sub>] then we say that the task
* attempt used just a fraction of the slot during this hour.
* The fraction equals the size of the intersection.
* Let slotTime(A, h) denote the number of slots calculated that way for a
* specific attempt A during hour h.
* The tool then sums all slots for all attempts for every hour.
* The result is the slot hour utilization of the cluster:
* <tt>slotTime(h) = SUM<sub>A</sub> slotTime(A,h)</tt>.
* <p>
* Log analyzer calculates slot hours for <em>MAP</em> and <em>REDUCE</em>
* attempts separately.
* <p>
* Log analyzer distinguishes between <em>successful</em> and <em>failed</em>
* attempts. Task attempt is considered successful if its own status is SUCCESS
* and the statuses of the task and the job it is a part of are also SUCCESS.
* Otherwise the task attempt is considered failed.
* <p>
* Map-reduce clusters are usually configured to have a fixed number of MAP
* and REDUCE slots per node. Thus the maximal possible number of slots on
* the cluster is <tt>total_slots = total_nodes * slots_per_node</tt>.
* Effective slot hour cannot exceed <tt>total_slots</tt> for successful
* attempts.
* <p>
* <em>Pending time</em> characterizes the wait time of attempts.
* It is calculated similarly to the slot hour except that the wait interval
* starts when the job is submitted and ends when an attempt starts execution.
* In addition to that pending time also includes intervals between attempts
* of the same task if it was re-executed.
* <p>
* History log analyzer calculates two pending time variations. First is based
* on job submission time as described above, second, starts the wait interval
* when the job is launched rather than submitted.
*
* <h3>Input.</h3>
* The following input parameters can be specified in the argument string
* to the job log analyzer:
* <ul>
* <li><tt>-historyDir inputDir</tt> specifies the location of the directory
* where analyzer will be looking for job history log files.</li>
* <li><tt>-resFile resultFile</tt> the name of the result file.</li>
* <li><tt>-usersIncluded | -usersExcluded userList</tt> slot utilization and
* pending time can be calculated for all or for all but the specified users.
* <br>
* <tt>userList</tt> is a comma or semicolon separated list of users.</li>
* <li><tt>-gzip</tt> is used if history log files are compressed.
* Only {@link GzipCodec} is currently supported.</li>
* <li><tt>-jobDelimiter pattern</tt> one can concatenate original log files into
* larger file(s) with the specified delimiter to recognize the end of the log
* for one job from the next one.<br>
* <tt>pattern</tt> is a java regular expression
* {@link java.util.regex.Pattern}, which should match only the log delimiters.
* <br>
* E.g. pattern <tt>".!!FILE=.*!!"</tt> matches delimiters, which contain
* the original history log file names in the following form:<br>
* <tt>"$!!FILE=my.job.tracker.com_myJobId_user_wordcount.log!!"</tt></li>
* <li><tt>-clean</tt> cleans up default directories used by the analyzer.</li>
* <li><tt>-test</tt> test one file locally and exit;
* does not require map-reduce.</li>
* <li><tt>-help</tt> print usage.</li>
* </ul>
*
* <h3>Output.</h3>
* The output file is formatted as a tab separated table consisting of four
* columns: <tt>SERIES, PERIOD, TYPE, SLOT_HOUR</tt>.
* <ul>
* <li><tt>SERIES</tt> one of the four statistical series;</li>
* <li><tt>PERIOD</tt> the start of the time interval in the following format:
* <tt>"yyyy-mm-dd hh:mm:ss"</tt>;</li>
* <li><tt>TYPE</tt> the slot type, e.g. MAP or REDUCE;</li>
* <li><tt>SLOT_HOUR</tt> the value of the slot usage during this
* time interval.</li>
* </ul>
*/
@SuppressWarnings("deprecation")
public class JHLogAnalyzer {
private static final Log LOG = LogFactory.getLog(JHLogAnalyzer.class);
// Constants
private static final String JHLA_ROOT_DIR =
System.getProperty("test.build.data", "stats/JHLA");
private static final Path INPUT_DIR = new Path(JHLA_ROOT_DIR, "jhla_input");
private static final String BASE_INPUT_FILE_NAME = "jhla_in_";
private static final Path OUTPUT_DIR = new Path(JHLA_ROOT_DIR, "jhla_output");
private static final Path RESULT_FILE =
new Path(JHLA_ROOT_DIR, "jhla_result.txt");
private static final Path DEFAULT_HISTORY_DIR = new Path("history");
private static final int DEFAULT_TIME_INTERVAL_MSEC = 1000*60*60; // 1 hour
static{
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
static enum StatSeries {
STAT_ALL_SLOT_TIME
(AccumulatingReducer.VALUE_TYPE_LONG + "allSlotTime"),
STAT_FAILED_SLOT_TIME
(AccumulatingReducer.VALUE_TYPE_LONG + "failedSlotTime"),
STAT_SUBMIT_PENDING_SLOT_TIME
(AccumulatingReducer.VALUE_TYPE_LONG + "submitPendingSlotTime"),
STAT_LAUNCHED_PENDING_SLOT_TIME
(AccumulatingReducer.VALUE_TYPE_LONG + "launchedPendingSlotTime");
private String statName = null;
private StatSeries(String name) {this.statName = name;}
public String toString() {return statName;}
}
private static class FileCreateDaemon extends Thread {
private static final int NUM_CREATE_THREADS = 10;
private static volatile int numFinishedThreads;
private static volatile int numRunningThreads;
private static FileStatus[] jhLogFiles;
FileSystem fs;
int start;
int end;
FileCreateDaemon(FileSystem fs, int start, int end) {
this.fs = fs;
this.start = start;
this.end = end;
}
public void run() {
try {
for(int i=start; i < end; i++) {
String name = getFileName(i);
Path controlFile = new Path(INPUT_DIR, "in_file_" + name);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
Text.class, LongWritable.class,
CompressionType.NONE);
String logFile = jhLogFiles[i].getPath().toString();
writer.append(new Text(logFile), new LongWritable(0));
} catch(Exception e) {
throw new IOException(e);
} finally {
if (writer != null)
writer.close();
writer = null;
}
}
} catch(IOException ex) {
LOG.error("FileCreateDaemon failed.", ex);
}
numFinishedThreads++;
}
private static void createControlFile(FileSystem fs, Path jhLogDir
) throws IOException {
fs.delete(INPUT_DIR, true);
jhLogFiles = fs.listStatus(jhLogDir);
numFinishedThreads = 0;
try {
int start = 0;
int step = jhLogFiles.length / NUM_CREATE_THREADS
+ ((jhLogFiles.length % NUM_CREATE_THREADS) > 0 ? 1 : 0);
FileCreateDaemon[] daemons = new FileCreateDaemon[NUM_CREATE_THREADS];
numRunningThreads = 0;
for(int tIdx=0; tIdx < NUM_CREATE_THREADS && start < jhLogFiles.length; tIdx++) {
int end = Math.min(start + step, jhLogFiles.length);
daemons[tIdx] = new FileCreateDaemon(fs, start, end);
start += step;
numRunningThreads++;
}
for(int tIdx=0; tIdx < numRunningThreads; tIdx++) {
daemons[tIdx].start();
}
} finally {
int prevValue = 0;
while(numFinishedThreads < numRunningThreads) {
if(prevValue < numFinishedThreads) {
LOG.info("Finished " + numFinishedThreads + " threads out of " + numRunningThreads);
prevValue = numFinishedThreads;
}
try {Thread.sleep(500);} catch (InterruptedException e) {}
}
}
}
}
private static void createControlFile(FileSystem fs, Path jhLogDir
) throws IOException {
LOG.info("creating control file: JH log dir = " + jhLogDir);
FileCreateDaemon.createControlFile(fs, jhLogDir);
LOG.info("created control file: JH log dir = " + jhLogDir);
}
private static String getFileName(int fIdx) {
return BASE_INPUT_FILE_NAME + Integer.toString(fIdx);
}
/**
* If keyVal is of the form KEY="VALUE", then this will return [KEY, VALUE]
*/
private static String [] getKeyValue(String t) throws IOException {
String[] keyVal = t.split("=\"*|\"");
return keyVal;
}
/**
* JobHistory log record.
*/
private static class JobHistoryLog {
String JOBID;
String JOB_STATUS;
long SUBMIT_TIME;
long LAUNCH_TIME;
long FINISH_TIME;
long TOTAL_MAPS;
long TOTAL_REDUCES;
long FINISHED_MAPS;
long FINISHED_REDUCES;
String USER;
Map<String, TaskHistoryLog> tasks;
boolean isSuccessful() {
return (JOB_STATUS != null) && JOB_STATUS.equals("SUCCESS");
}
void parseLine(String line) throws IOException {
StringTokenizer tokens = new StringTokenizer(line);
if(!tokens.hasMoreTokens())
return;
String what = tokens.nextToken();
// Line should start with one of the following:
// Job, Task, MapAttempt, ReduceAttempt
if(what.equals("Job"))
updateJob(tokens);
else if(what.equals("Task"))
updateTask(tokens);
else if(what.indexOf("Attempt") >= 0)
updateTaskAttempt(tokens);
}
private void updateJob(StringTokenizer tokens) throws IOException {
while(tokens.hasMoreTokens()) {
String t = tokens.nextToken();
String[] keyVal = getKeyValue(t);
if(keyVal.length < 2) continue;
if(keyVal[0].equals("JOBID")) {
if(JOBID == null)
JOBID = new String(keyVal[1]);
else if(!JOBID.equals(keyVal[1])) {
LOG.error("Incorrect JOBID: "
+ keyVal[1].substring(0, Math.min(keyVal[1].length(), 100))
+ " expect " + JOBID);
return;
}
}
else if(keyVal[0].equals("JOB_STATUS"))
JOB_STATUS = new String(keyVal[1]);
else if(keyVal[0].equals("SUBMIT_TIME"))
SUBMIT_TIME = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("LAUNCH_TIME"))
LAUNCH_TIME = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("FINISH_TIME"))
FINISH_TIME = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("TOTAL_MAPS"))
TOTAL_MAPS = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("TOTAL_REDUCES"))
TOTAL_REDUCES = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("FINISHED_MAPS"))
FINISHED_MAPS = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("FINISHED_REDUCES"))
FINISHED_REDUCES = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("USER"))
USER = new String(keyVal[1]);
}
}
private void updateTask(StringTokenizer tokens) throws IOException {
// unpack
TaskHistoryLog task = new TaskHistoryLog().parse(tokens);
if(task.TASKID == null) {
LOG.error("TASKID = NULL for job " + JOBID);
return;
}
// update or insert
if(tasks == null)
tasks = new HashMap<String, TaskHistoryLog>((int)(TOTAL_MAPS + TOTAL_REDUCES));
TaskHistoryLog existing = tasks.get(task.TASKID);
if(existing == null)
tasks.put(task.TASKID, task);
else
existing.updateWith(task);
}
private void updateTaskAttempt(StringTokenizer tokens) throws IOException {
// unpack
TaskAttemptHistoryLog attempt = new TaskAttemptHistoryLog();
String taskID = attempt.parse(tokens);
if(taskID == null) return;
if(tasks == null)
tasks = new HashMap<String, TaskHistoryLog>((int)(TOTAL_MAPS + TOTAL_REDUCES));
TaskHistoryLog existing = tasks.get(taskID);
if(existing == null) {
existing = new TaskHistoryLog(taskID);
tasks.put(taskID, existing);
}
existing.updateWith(attempt);
}
}
/**
* TaskHistory log record.
*/
private static class TaskHistoryLog {
String TASKID;
String TASK_TYPE; // MAP, REDUCE, SETUP, CLEANUP
String TASK_STATUS;
long START_TIME;
long FINISH_TIME;
Map<String, TaskAttemptHistoryLog> attempts;
TaskHistoryLog() {}
TaskHistoryLog(String taskID) {
TASKID = taskID;
}
boolean isSuccessful() {
return (TASK_STATUS != null) && TASK_STATUS.equals("SUCCESS");
}
TaskHistoryLog parse(StringTokenizer tokens) throws IOException {
while(tokens.hasMoreTokens()) {
String t = tokens.nextToken();
String[] keyVal = getKeyValue(t);
if(keyVal.length < 2) continue;
if(keyVal[0].equals("TASKID")) {
if(TASKID == null)
TASKID = new String(keyVal[1]);
else if(!TASKID.equals(keyVal[1])) {
LOG.error("Incorrect TASKID: "
+ keyVal[1].substring(0, Math.min(keyVal[1].length(), 100))
+ " expect " + TASKID);
continue;
}
}
else if(keyVal[0].equals("TASK_TYPE"))
TASK_TYPE = new String(keyVal[1]);
else if(keyVal[0].equals("TASK_STATUS"))
TASK_STATUS = new String(keyVal[1]);
else if(keyVal[0].equals("START_TIME"))
START_TIME = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("FINISH_TIME"))
FINISH_TIME = Long.parseLong(keyVal[1]);
}
return this;
}
/**
* Update with non-null fields of the same task log record.
*/
void updateWith(TaskHistoryLog from) throws IOException {
if(TASKID == null)
TASKID = from.TASKID;
else if(!TASKID.equals(from.TASKID)) {
throw new IOException("Incorrect TASKID: " + from.TASKID
+ " expect " + TASKID);
}
if(TASK_TYPE == null)
TASK_TYPE = from.TASK_TYPE;
else if(! TASK_TYPE.equals(from.TASK_TYPE)) {
LOG.error(
"Incorrect TASK_TYPE: " + from.TASK_TYPE + " expect " + TASK_TYPE
+ " for task " + TASKID);
return;
}
if(from.TASK_STATUS != null)
TASK_STATUS = from.TASK_STATUS;
if(from.START_TIME > 0)
START_TIME = from.START_TIME;
if(from.FINISH_TIME > 0)
FINISH_TIME = from.FINISH_TIME;
}
/**
* Update with non-null fields of the task attempt log record.
*/
void updateWith(TaskAttemptHistoryLog attempt) throws IOException {
if(attempt.TASK_ATTEMPT_ID == null) {
LOG.error("Unexpected TASK_ATTEMPT_ID = null for task " + TASKID);
return;
}
if(attempts == null)
attempts = new HashMap<String, TaskAttemptHistoryLog>();
TaskAttemptHistoryLog existing = attempts.get(attempt.TASK_ATTEMPT_ID);
if(existing == null)
attempts.put(attempt.TASK_ATTEMPT_ID, attempt);
else
existing.updateWith(attempt);
// update task start time
if(attempt.START_TIME > 0 &&
(this.START_TIME == 0 || this.START_TIME > attempt.START_TIME))
START_TIME = attempt.START_TIME;
}
}
/**
* TaskAttemptHistory log record.
*/
private static class TaskAttemptHistoryLog {
String TASK_ATTEMPT_ID;
String TASK_STATUS; // this task attempt status
long START_TIME;
long FINISH_TIME;
long HDFS_BYTES_READ;
long HDFS_BYTES_WRITTEN;
long FILE_BYTES_READ;
long FILE_BYTES_WRITTEN;
/**
* Task attempt is considered successful iff all three statuses
* of the attempt, the task, and the job equal "SUCCESS".
*/
boolean isSuccessful() {
return (TASK_STATUS != null) && TASK_STATUS.equals("SUCCESS");
}
String parse(StringTokenizer tokens) throws IOException {
String taskID = null;
while(tokens.hasMoreTokens()) {
String t = tokens.nextToken();
String[] keyVal = getKeyValue(t);
if(keyVal.length < 2) continue;
if(keyVal[0].equals("TASKID")) {
if(taskID == null)
taskID = new String(keyVal[1]);
else if(!taskID.equals(keyVal[1])) {
LOG.error("Incorrect TASKID: " + keyVal[1] + " expect " + taskID);
continue;
}
}
else if(keyVal[0].equals("TASK_ATTEMPT_ID")) {
if(TASK_ATTEMPT_ID == null)
TASK_ATTEMPT_ID = new String(keyVal[1]);
else if(!TASK_ATTEMPT_ID.equals(keyVal[1])) {
LOG.error("Incorrect TASKID: " + keyVal[1] + " expect " + taskID);
continue;
}
}
else if(keyVal[0].equals("TASK_STATUS"))
TASK_STATUS = new String(keyVal[1]);
else if(keyVal[0].equals("START_TIME"))
START_TIME = Long.parseLong(keyVal[1]);
else if(keyVal[0].equals("FINISH_TIME"))
FINISH_TIME = Long.parseLong(keyVal[1]);
}
return taskID;
}
/**
* Update with non-null fields of the same task attempt log record.
*/
void updateWith(TaskAttemptHistoryLog from) throws IOException {
if(TASK_ATTEMPT_ID == null)
TASK_ATTEMPT_ID = from.TASK_ATTEMPT_ID;
else if(! TASK_ATTEMPT_ID.equals(from.TASK_ATTEMPT_ID)) {
throw new IOException(
"Incorrect TASK_ATTEMPT_ID: " + from.TASK_ATTEMPT_ID
+ " expect " + TASK_ATTEMPT_ID);
}
if(from.TASK_STATUS != null)
TASK_STATUS = from.TASK_STATUS;
if(from.START_TIME > 0)
START_TIME = from.START_TIME;
if(from.FINISH_TIME > 0)
FINISH_TIME = from.FINISH_TIME;
if(from.HDFS_BYTES_READ > 0)
HDFS_BYTES_READ = from.HDFS_BYTES_READ;
if(from.HDFS_BYTES_WRITTEN > 0)
HDFS_BYTES_WRITTEN = from.HDFS_BYTES_WRITTEN;
if(from.FILE_BYTES_READ > 0)
FILE_BYTES_READ = from.FILE_BYTES_READ;
if(from.FILE_BYTES_WRITTEN > 0)
FILE_BYTES_WRITTEN = from.FILE_BYTES_WRITTEN;
}
}
/**
* Key = statName*date-time*taskType
* Value = number of msec for the our
*/
private static class IntervalKey {
static final String KEY_FIELD_DELIMITER = "*";
String statName;
String dateTime;
String taskType;
IntervalKey(String stat, long timeMSec, String taskType) {
statName = stat;
SimpleDateFormat dateF = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
dateTime = dateF.format(new Date(timeMSec));
this.taskType = taskType;
}
IntervalKey(String key) {
StringTokenizer keyTokens = new StringTokenizer(key, KEY_FIELD_DELIMITER);
if(!keyTokens.hasMoreTokens()) return;
statName = keyTokens.nextToken();
if(!keyTokens.hasMoreTokens()) return;
dateTime = keyTokens.nextToken();
if(!keyTokens.hasMoreTokens()) return;
taskType = keyTokens.nextToken();
}
void setStatName(String stat) {
statName = stat;
}
String getStringKey() {
return statName + KEY_FIELD_DELIMITER +
dateTime + KEY_FIELD_DELIMITER +
taskType;
}
Text getTextKey() {
return new Text(getStringKey());
}
public String toString() {
return getStringKey();
}
}
/**
* Mapper class.
*/
private static class JHLAMapper extends IOMapperBase<Object> {
/**
* A line pattern, which delimits history logs of different jobs,
* if multiple job logs are written in the same file.
* Null value means only one job log per file is expected.
* The pattern should be a regular expression as in
* {@link String#matches(String)}.
*/
String jobDelimiterPattern;
int maxJobDelimiterLineLength;
/** Count only these users jobs */
Collection<String> usersIncluded;
/** Exclude jobs of the following users */
Collection<String> usersExcluded;
/** Type of compression for compressed files: gzip */
Class<? extends CompressionCodec> compressionClass;
JHLAMapper() throws IOException {
}
JHLAMapper(Configuration conf) throws IOException {
configure(new JobConf(conf));
}
public void configure(JobConf conf) {
super.configure(conf );
usersIncluded = getUserList(conf.get("jhla.users.included", null));
usersExcluded = getUserList(conf.get("jhla.users.excluded", null));
String zipClassName = conf.get("jhla.compression.class", null);
try {
compressionClass = (zipClassName == null) ? null :
Class.forName(zipClassName).asSubclass(CompressionCodec.class);
} catch(Exception e) {
throw new RuntimeException("Compression codec not found: ", e);
}
jobDelimiterPattern = conf.get("jhla.job.delimiter.pattern", null);
maxJobDelimiterLineLength = conf.getInt("jhla.job.delimiter.length", 512);
}
@Override
public void map(Text key,
LongWritable value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String name = key.toString();
long longValue = value.get();
reporter.setStatus("starting " + name + " ::host = " + hostName);
long tStart = System.currentTimeMillis();
parseLogFile(fs, new Path(name), longValue, output, reporter);
long tEnd = System.currentTimeMillis();
long execTime = tEnd - tStart;
reporter.setStatus("finished " + name + " ::host = " + hostName +
" in " + execTime/1000 + " sec.");
}
public Object doIO(Reporter reporter,
String path, // full path of history log file
long offset // starting offset within the file
) throws IOException {
return null;
}
void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
Object jobObjects) throws IOException {
}
private boolean isEndOfJobLog(String line) {
if(jobDelimiterPattern == null)
return false;
return line.matches(jobDelimiterPattern);
}
/**
* Collect information about one job.
*
* @param fs - file system
* @param filePath - full path of a history log file
* @param offset - starting offset in the history log file
* @throws IOException
*/
public void parseLogFile(FileSystem fs,
Path filePath,
long offset,
OutputCollector<Text, Text> output,
Reporter reporter
) throws IOException {
InputStream in = null;
try {
// open file & seek
FSDataInputStream stm = fs.open(filePath);
stm.seek(offset);
in = stm;
LOG.info("Opened " + filePath);
reporter.setStatus("Opened " + filePath);
// get a compression filter if specified
if(compressionClass != null) {
CompressionCodec codec = (CompressionCodec)
ReflectionUtils.newInstance(compressionClass, new Configuration());
in = codec.createInputStream(stm);
LOG.info("Codec created " + filePath);
reporter.setStatus("Codec created " + filePath);
}
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
LOG.info("Reader created " + filePath);
// skip to the next job log start
long processed = 0L;
if(jobDelimiterPattern != null) {
for(String line = reader.readLine();
line != null; line = reader.readLine()) {
if((stm.getPos() - processed) > 100000) {
processed = stm.getPos();
reporter.setStatus("Processing " + filePath + " at " + processed);
}
if(isEndOfJobLog(line))
break;
}
}
// parse lines and update job history
JobHistoryLog jh = new JobHistoryLog();
int jobLineCount = 0;
for(String line = readLine(reader);
line != null; line = readLine(reader)) {
jobLineCount++;
if((stm.getPos() - processed) > 20000) {
processed = stm.getPos();
long numTasks = (jh.tasks == null ? 0 : jh.tasks.size());
String txt = "Processing " + filePath + " at " + processed
+ " # tasks = " + numTasks;
reporter.setStatus(txt);
LOG.info(txt);
}
if(isEndOfJobLog(line)) {
if(jh.JOBID != null) {
LOG.info("Finished parsing job: " + jh.JOBID
+ " line count = " + jobLineCount);
collectJobStats(jh, output, reporter);
LOG.info("Collected stats for job: " + jh.JOBID);
}
jh = new JobHistoryLog();
jobLineCount = 0;
} else
jh.parseLine(line);
}
if(jh.JOBID == null) {
LOG.error("JOBID = NULL in " + filePath + " at " + processed);
return;
}
collectJobStats(jh, output, reporter);
} catch(Exception ie) {
// parsing errors can happen if the file has been truncated
LOG.error("JHLAMapper.parseLogFile", ie);
reporter.setStatus("JHLAMapper.parseLogFile failed "
+ StringUtils.stringifyException(ie));
throw new IOException("Job failed.", ie);
} finally {
if(in != null) in.close();
}
}
/**
* Read lines until one ends with a " ." or "\" "
*/
private StringBuffer resBuffer = new StringBuffer();
private String readLine(BufferedReader reader) throws IOException {
resBuffer.setLength(0);
reader.mark(maxJobDelimiterLineLength);
for(String line = reader.readLine();
line != null; line = reader.readLine()) {
if(isEndOfJobLog(line)) {
if(resBuffer.length() == 0)
resBuffer.append(line);
else
reader.reset();
break;
}
if(resBuffer.length() == 0)
resBuffer.append(line);
else if(resBuffer.length() < 32000)
resBuffer.append(line);
if(line.endsWith(" .") || line.endsWith("\" ")) {
break;
}
reader.mark(maxJobDelimiterLineLength);
}
String result = resBuffer.length() == 0 ? null : resBuffer.toString();
resBuffer.setLength(0);
return result;
}
private void collectPerIntervalStats(OutputCollector<Text, Text> output,
long start, long finish, String taskType,
StatSeries ... stats) throws IOException {
long curInterval = (start / DEFAULT_TIME_INTERVAL_MSEC)
* DEFAULT_TIME_INTERVAL_MSEC;
long curTime = start;
long accumTime = 0;
while(curTime < finish) {
// how much of the task time belonged to current interval
long nextInterval = curInterval + DEFAULT_TIME_INTERVAL_MSEC;
long intervalTime = ((finish < nextInterval) ?
finish : nextInterval) - curTime;
IntervalKey key = new IntervalKey("", curInterval, taskType);
Text val = new Text(String.valueOf(intervalTime));
for(StatSeries statName : stats) {
key.setStatName(statName.toString());
output.collect(key.getTextKey(), val);
}
curTime = curInterval = nextInterval;
accumTime += intervalTime;
}
// For the pending stat speculative attempts may intersect.
// Only one of them is considered pending.
assert accumTime == finish - start || finish < start;
}
private void collectJobStats(JobHistoryLog jh,
OutputCollector<Text, Text> output,
Reporter reporter
) throws IOException {
if(jh == null)
return;
if(jh.tasks == null)
return;
if(jh.SUBMIT_TIME <= 0)
throw new IOException("Job " + jh.JOBID
+ " SUBMIT_TIME = " + jh.SUBMIT_TIME);
if(usersIncluded != null && !usersIncluded.contains(jh.USER))
return;
if(usersExcluded != null && usersExcluded.contains(jh.USER))
return;
int numAttempts = 0;
long totalTime = 0;
boolean jobSuccess = jh.isSuccessful();
long jobWaitTime = jh.LAUNCH_TIME - jh.SUBMIT_TIME;
// attemptSubmitTime is the job's SUBMIT_TIME,
// or the previous attempt FINISH_TIME for all subsequent attempts
for(TaskHistoryLog th : jh.tasks.values()) {
if(th.attempts == null)
continue;
// Task is successful iff both the task and the job are a "SUCCESS"
long attemptSubmitTime = jh.LAUNCH_TIME;
boolean taskSuccess = jobSuccess && th.isSuccessful();
for(TaskAttemptHistoryLog tah : th.attempts.values()) {
// Task attempt is considered successful iff all three statuses
// of the attempt, the task, and the job equal "SUCCESS"
boolean success = taskSuccess && tah.isSuccessful();
if(tah.START_TIME == 0) {
LOG.error("Start time 0 for task attempt " + tah.TASK_ATTEMPT_ID);
continue;
}
if(tah.FINISH_TIME < tah.START_TIME) {
LOG.error("Finish time " + tah.FINISH_TIME + " is less than " +
"Start time " + tah.START_TIME + " for task attempt " +
tah.TASK_ATTEMPT_ID);
tah.FINISH_TIME = tah.START_TIME;
}
if(!"MAP".equals(th.TASK_TYPE) && !"REDUCE".equals(th.TASK_TYPE) &&
!"CLEANUP".equals(th.TASK_TYPE) && !"SETUP".equals(th.TASK_TYPE)) {
LOG.error("Unexpected TASK_TYPE = " + th.TASK_TYPE
+ " for attempt " + tah.TASK_ATTEMPT_ID);
}
collectPerIntervalStats(output,
attemptSubmitTime, tah.START_TIME, th.TASK_TYPE,
StatSeries.STAT_LAUNCHED_PENDING_SLOT_TIME);
collectPerIntervalStats(output,
attemptSubmitTime - jobWaitTime, tah.START_TIME, th.TASK_TYPE,
StatSeries.STAT_SUBMIT_PENDING_SLOT_TIME);
if(success)
collectPerIntervalStats(output,
tah.START_TIME, tah.FINISH_TIME, th.TASK_TYPE,
StatSeries.STAT_ALL_SLOT_TIME);
else
collectPerIntervalStats(output,
tah.START_TIME, tah.FINISH_TIME, th.TASK_TYPE,
StatSeries.STAT_ALL_SLOT_TIME,
StatSeries.STAT_FAILED_SLOT_TIME);
totalTime += (tah.FINISH_TIME - tah.START_TIME);
numAttempts++;
if(numAttempts % 500 == 0) {
reporter.setStatus("Processing " + jh.JOBID + " at " + numAttempts);
}
attemptSubmitTime = tah.FINISH_TIME;
}
}
LOG.info("Total Maps = " + jh.TOTAL_MAPS
+ " Reduces = " + jh.TOTAL_REDUCES);
LOG.info("Finished Maps = " + jh.FINISHED_MAPS
+ " Reduces = " + jh.FINISHED_REDUCES);
LOG.info("numAttempts = " + numAttempts);
LOG.info("totalTime = " + totalTime);
LOG.info("averageAttemptTime = "
+ (numAttempts==0 ? 0 : totalTime/numAttempts));
LOG.info("jobTotalTime = " + (jh.FINISH_TIME <= jh.SUBMIT_TIME? 0 :
jh.FINISH_TIME - jh.SUBMIT_TIME));
}
}
public static class JHLAPartitioner implements Partitioner<Text, Text> {
static final int NUM_REDUCERS = 9;
public void configure(JobConf conf) {}
public int getPartition(Text key, Text value, int numPartitions) {
IntervalKey intKey = new IntervalKey(key.toString());
if(intKey.statName.equals(StatSeries.STAT_ALL_SLOT_TIME.toString())) {
if(intKey.taskType.equals("MAP"))
return 0;
else if(intKey.taskType.equals("REDUCE"))
return 1;
} else if(intKey.statName.equals(
StatSeries.STAT_SUBMIT_PENDING_SLOT_TIME.toString())) {
if(intKey.taskType.equals("MAP"))
return 2;
else if(intKey.taskType.equals("REDUCE"))
return 3;
} else if(intKey.statName.equals(
StatSeries.STAT_LAUNCHED_PENDING_SLOT_TIME.toString())) {
if(intKey.taskType.equals("MAP"))
return 4;
else if(intKey.taskType.equals("REDUCE"))
return 5;
} else if(intKey.statName.equals(
StatSeries.STAT_FAILED_SLOT_TIME.toString())) {
if(intKey.taskType.equals("MAP"))
return 6;
else if(intKey.taskType.equals("REDUCE"))
return 7;
}
return 8;
}
}
private static void runJHLA(
Class<? extends Mapper<Text, LongWritable, Text, Text>> mapperClass,
Path outputDir,
Configuration fsConfig) throws IOException {
JobConf job = new JobConf(fsConfig, JHLogAnalyzer.class);
job.setPartitionerClass(JHLAPartitioner.class);
FileInputFormat.setInputPaths(job, INPUT_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(mapperClass);
job.setReducerClass(AccumulatingReducer.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(JHLAPartitioner.NUM_REDUCERS);
JobClient.runJob(job);
}
private static class LoggingCollector implements OutputCollector<Text, Text> {
public void collect(Text key, Text value) throws IOException {
LOG.info(key + " == " + value);
}
}
/**
* Run job history log analyser.
*/
public static void main(String[] args) {
Path resFileName = RESULT_FILE;
Configuration conf = new Configuration();
try {
conf.setInt("test.io.file.buffer.size", 0);
Path historyDir = DEFAULT_HISTORY_DIR;
String testFile = null;
boolean cleanup = false;
boolean initControlFiles = true;
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equalsIgnoreCase("-historyDir")) {
historyDir = new Path(args[++i]);
} else if (args[i].equalsIgnoreCase("-resFile")) {
resFileName = new Path(args[++i]);
} else if (args[i].equalsIgnoreCase("-usersIncluded")) {
conf.set("jhla.users.included", args[++i]);
} else if (args[i].equalsIgnoreCase("-usersExcluded")) {
conf.set("jhla.users.excluded", args[++i]);
} else if (args[i].equalsIgnoreCase("-gzip")) {
conf.set("jhla.compression.class", GzipCodec.class.getCanonicalName());
} else if (args[i].equalsIgnoreCase("-jobDelimiter")) {
conf.set("jhla.job.delimiter.pattern", args[++i]);
} else if (args[i].equalsIgnoreCase("-jobDelimiterLength")) {
conf.setInt("jhla.job.delimiter.length", Integer.parseInt(args[++i]));
} else if(args[i].equalsIgnoreCase("-noInit")) {
initControlFiles = false;
} else if(args[i].equalsIgnoreCase("-test")) {
testFile = args[++i];
} else if(args[i].equalsIgnoreCase("-clean")) {
cleanup = true;
} else if(args[i].equalsIgnoreCase("-jobQueue")) {
conf.set("mapred.job.queue.name", args[++i]);
} else if(args[i].startsWith("-Xmx")) {
conf.set("mapred.child.java.opts", args[i]);
} else {
printUsage();
}
}
if(cleanup) {
cleanup(conf);
return;
}
if(testFile != null) {
LOG.info("Start JHLA test ============ ");
LocalFileSystem lfs = FileSystem.getLocal(conf);
conf.set("fs.defaultFS", "file:///");
JHLAMapper map = new JHLAMapper(conf);
map.parseLogFile(lfs, new Path(testFile), 0L,
new LoggingCollector(), Reporter.NULL);
return;
}
FileSystem fs = FileSystem.get(conf);
if(initControlFiles)
createControlFile(fs, historyDir);
long tStart = System.currentTimeMillis();
runJHLA(JHLAMapper.class, OUTPUT_DIR, conf);
long execTime = System.currentTimeMillis() - tStart;
analyzeResult(fs, 0, execTime, resFileName);
} catch(IOException e) {
System.err.print(StringUtils.stringifyException(e));
System.exit(-1);
}
}
private static void printUsage() {
String className = JHLogAnalyzer.class.getSimpleName();
System.err.println("Usage: " + className
+ "\n\t[-historyDir inputDir] | [-resFile resultFile] |"
+ "\n\t[-usersIncluded | -usersExcluded userList] |"
+ "\n\t[-gzip] | [-jobDelimiter pattern] |"
+ "\n\t[-help | -clean | -test testFile]");
System.exit(-1);
}
private static Collection<String> getUserList(String users) {
if(users == null)
return null;
StringTokenizer tokens = new StringTokenizer(users, ",;");
Collection<String> userList = new ArrayList<String>(tokens.countTokens());
while(tokens.hasMoreTokens())
userList.add(tokens.nextToken());
return userList;
}
/**
* Result is combined from all reduce output files and is written to
* RESULT_FILE in the format
* column 1:
*/
private static void analyzeResult( FileSystem fs,
int testType,
long execTime,
Path resFileName
) throws IOException {
LOG.info("Analyzing results ...");
DataOutputStream out = null;
BufferedWriter writer = null;
try {
out = new DataOutputStream(fs.create(resFileName));
writer = new BufferedWriter(new OutputStreamWriter(out));
writer.write("SERIES\tPERIOD\tTYPE\tSLOT_HOUR\n");
FileStatus[] reduceFiles = fs.listStatus(OUTPUT_DIR);
assert reduceFiles.length == JHLAPartitioner.NUM_REDUCERS;
for(int i = 0; i < JHLAPartitioner.NUM_REDUCERS; i++) {
DataInputStream in = null;
BufferedReader lines = null;
try {
in = fs.open(reduceFiles[i].getPath());
lines = new BufferedReader(new InputStreamReader(in));
String line;
while((line = lines.readLine()) != null) {
StringTokenizer tokens = new StringTokenizer(line, "\t*");
String attr = tokens.nextToken();
String dateTime = tokens.nextToken();
String taskType = tokens.nextToken();
double val = Long.parseLong(tokens.nextToken()) /
(double)DEFAULT_TIME_INTERVAL_MSEC;
writer.write(attr.substring(2)); // skip the stat type "l:"
writer.write("\t");
writer.write(dateTime);
writer.write("\t");
writer.write(taskType);
writer.write("\t");
writer.write(String.valueOf((float)val));
writer.newLine();
}
} finally {
if(lines != null) lines.close();
if(in != null) in.close();
}
}
} finally {
if(writer != null) writer.close();
if(out != null) out.close();
}
LOG.info("Analyzing results ... done.");
}
private static void cleanup(Configuration conf) throws IOException {
LOG.info("Cleaning up test files");
FileSystem fs = FileSystem.get(conf);
fs.delete(new Path(JHLA_ROOT_DIR), true);
}
}
| 42,514 | 36.623894 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.loadGenerator;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.PrintStream;
import java.net.UnknownHostException;
import java.util.EnumSet;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.ToolRunner;
/** The load generator is a tool for testing NameNode behavior under
* different client loads.
* The main code is in HadoopCommon, @LoadGenerator. This class, LoadGeneratorMR
* lets you run that LoadGenerator as a MapReduce job.
*
* The synopsis of the command is
* java LoadGeneratorMR
* -mr <numMapJobs> <outputDir> : results in outputDir/Results
* the rest of the args are the same as the original LoadGenerator.
*
*/
public class LoadGeneratorMR extends LoadGenerator {
public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
private static int numMapTasks = 1;
private String mrOutDir;
final private static String USAGE_CMD = "java LoadGeneratorMR\n";
final private static String USAGE = USAGE_CMD
+ "-mr <numMapJobs> <outputDir> [MUST be first 3 args] \n" + USAGE_ARGS ;
// Constant "keys" used to communicate between map and reduce
final private static Text OPEN_EXECTIME = new Text("OpenExecutionTime");
final private static Text NUMOPS_OPEN = new Text("NumOpsOpen");
final private static Text LIST_EXECTIME = new Text("ListExecutionTime");
final private static Text NUMOPS_LIST = new Text("NumOpsList");
final private static Text DELETE_EXECTIME = new Text("DeletionExecutionTime");
final private static Text NUMOPS_DELETE = new Text("NumOpsDelete");
final private static Text CREATE_EXECTIME = new Text("CreateExecutionTime");
final private static Text NUMOPS_CREATE = new Text("NumOpsCreate");
final private static Text WRITE_CLOSE_EXECTIME = new Text("WriteCloseExecutionTime");
final private static Text NUMOPS_WRITE_CLOSE = new Text("NumOpsWriteClose");
final private static Text ELAPSED_TIME = new Text("ElapsedTime");
final private static Text TOTALOPS = new Text("TotalOps");
// Config keys to pass args from Main to the Job
final private static String LG_ROOT = "LG.root";
final private static String LG_SCRIPTFILE = "LG.scriptFile";
final private static String LG_MAXDELAYBETWEENOPS = "LG.maxDelayBetweenOps";
final private static String LG_NUMOFTHREADS = "LG.numOfThreads";
final private static String LG_READPR = "LG.readPr";
final private static String LG_WRITEPR = "LG.writePr";
final private static String LG_SEED = "LG.r";
final private static String LG_NUMMAPTASKS = "LG.numMapTasks";
final private static String LG_ELAPSEDTIME = "LG.elapsedTime";
final private static String LG_STARTTIME = "LG.startTime";
final private static String LG_FLAGFILE = "LG.flagFile";
/** Constructor */
public LoadGeneratorMR() throws IOException, UnknownHostException {
super();
}
public LoadGeneratorMR(Configuration conf) throws IOException, UnknownHostException {
this();
setConf(conf);
}
/** Main function called by tool runner.
* It first initializes data by parsing the command line arguments.
* It then calls the loadGenerator
*/
@Override
public int run(String[] args) throws Exception {
int exitCode = parseArgsMR(args);
if (exitCode != 0) {
return exitCode;
}
System.out.println("Running LoadGeneratorMR against fileSystem: " +
FileContext.getFileContext().getDefaultFileSystem().getUri());
return submitAsMapReduce(); // reducer will print the results
}
/**
* Parse the command line arguments and initialize the data.
* Only parse the first arg: -mr <numMapTasks> <mrOutDir> (MUST be first three Args)
* The rest are parsed by the Parent LoadGenerator
**/
private int parseArgsMR(String[] args) throws IOException {
try {
if (args.length >= 3 && args[0].equals("-mr")) {
numMapTasks = Integer.parseInt(args[1]);
mrOutDir = args[2];
if (mrOutDir.startsWith("-")) {
System.err.println("Missing output file parameter, instead got: "
+ mrOutDir);
System.err.println(USAGE);
return -1;
}
} else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
String[] strippedArgs = new String[args.length - 3];
for (int i = 0; i < strippedArgs.length; i++) {
strippedArgs[i] = args[i + 3];
}
super.parseArgs(true, strippedArgs); // Parse normal LoadGenerator args
} catch (NumberFormatException e) {
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
System.err.println(USAGE);
return -1;
}
return 0;
}
/** Main program
*
* @param args command line arguments
* @throws Exception
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new LoadGeneratorMR(), args);
System.exit(res);
}
// The following methods are only used when LoadGenerator is run a MR job
/**
* Based on args we submit the LoadGenerator as MR job.
* Number of MapTasks is numMapTasks
* @return exitCode for job submission
*/
private int submitAsMapReduce() {
System.out.println("Running as a MapReduce job with " +
numMapTasks + " mapTasks; Output to file " + mrOutDir);
Configuration conf = new Configuration(getConf());
// First set all the args of LoadGenerator as Conf vars to pass to MR tasks
conf.set(LG_ROOT , root.toString());
conf.setInt(LG_MAXDELAYBETWEENOPS, maxDelayBetweenOps);
conf.setInt(LG_NUMOFTHREADS, numOfThreads);
conf.set(LG_READPR, readProbs[0]+""); //Pass Double as string
conf.set(LG_WRITEPR, writeProbs[0]+""); //Pass Double as string
conf.setLong(LG_SEED, seed); //No idea what this is
conf.setInt(LG_NUMMAPTASKS, numMapTasks);
if (scriptFile == null && durations[0] <=0) {
System.err.println("When run as a MapReduce job, elapsed Time or ScriptFile must be specified");
System.exit(-1);
}
conf.setLong(LG_ELAPSEDTIME, durations[0]);
conf.setLong(LG_STARTTIME, startTime);
if (scriptFile != null) {
conf.set(LG_SCRIPTFILE , scriptFile);
}
conf.set(LG_FLAGFILE, flagFile.toString());
// Now set the necessary conf variables that apply to run MR itself.
JobConf jobConf = new JobConf(conf, LoadGenerator.class);
jobConf.setJobName("NNLoadGeneratorViaMR");
jobConf.setNumMapTasks(numMapTasks);
jobConf.setNumReduceTasks(1); // 1 reducer to collect the results
jobConf.setOutputKeyClass(Text.class);
jobConf.setOutputValueClass(IntWritable.class);
jobConf.setMapperClass(MapperThatRunsNNLoadGenerator.class);
jobConf.setReducerClass(ReducerThatCollectsLGdata.class);
jobConf.setInputFormat(DummyInputFormat.class);
jobConf.setOutputFormat(TextOutputFormat.class);
// Explicitly set number of max map attempts to 1.
jobConf.setMaxMapAttempts(1);
// Explicitly turn off speculative execution
jobConf.setSpeculativeExecution(false);
// This mapReduce job has no input but has output
FileOutputFormat.setOutputPath(jobConf, new Path(mrOutDir));
try {
JobClient.runJob(jobConf);
} catch (IOException e) {
System.err.println("Failed to run job: " + e.getMessage());
return -1;
}
return 0;
}
// Each split is empty
public static class EmptySplit implements InputSplit {
public void write(DataOutput out) throws IOException {}
public void readFields(DataInput in) throws IOException {}
public long getLength() {return 0L;}
public String[] getLocations() {return new String[0];}
}
// Dummy Input format to send 1 record - number of spits is numMapTasks
public static class DummyInputFormat extends Configured implements
InputFormat<LongWritable, Text> {
public InputSplit[] getSplits(JobConf conf, int numSplits) {
numSplits = conf.getInt("LG.numMapTasks", 1);
InputSplit[] ret = new InputSplit[numSplits];
for (int i = 0; i < numSplits; ++i) {
ret[i] = new EmptySplit();
}
return ret;
}
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit ignored, JobConf conf, Reporter reporter) throws IOException {
return new RecordReader<LongWritable, Text>() {
boolean sentOneRecord = false;
public boolean next(LongWritable key, Text value)
throws IOException {
key.set(1);
value.set("dummy");
if (sentOneRecord == false) { // first call
sentOneRecord = true;
return true;
}
return false; // we have sent one record - we are done
}
public LongWritable createKey() {
return new LongWritable();
}
public Text createValue() {
return new Text();
}
public long getPos() throws IOException {
return 1;
}
public void close() throws IOException {
}
public float getProgress() throws IOException {
return 1;
}
};
}
}
public static class MapperThatRunsNNLoadGenerator extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
private JobConf jobConf;
@Override
public void configure(JobConf job) {
this.jobConf = job;
getArgsFromConfiguration(jobConf);
}
private class ProgressThread extends Thread {
boolean keepGoing; // while this is true, thread runs.
private Reporter reporter;
public ProgressThread(final Reporter r) {
this.reporter = r;
this.keepGoing = true;
}
public void run() {
while (keepGoing) {
if (!ProgressThread.interrupted()) {
try {
sleep(30 * 1000);
} catch (InterruptedException e) {
}
}
reporter.progress();
}
}
}
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
ProgressThread progressThread = new ProgressThread(reporter);
progressThread.start();
try {
new LoadGenerator(jobConf).generateLoadOnNN();
System.out
.println("Finished generating load on NN, sending results to the reducer");
printResults(System.out);
progressThread.keepGoing = false;
progressThread.join();
// Send results to Reducer
output.collect(OPEN_EXECTIME,
new IntWritable((int) executionTime[OPEN]));
output.collect(NUMOPS_OPEN, new IntWritable((int) numOfOps[OPEN]));
output.collect(LIST_EXECTIME,
new IntWritable((int) executionTime[LIST]));
output.collect(NUMOPS_LIST, new IntWritable((int) numOfOps[LIST]));
output.collect(DELETE_EXECTIME, new IntWritable(
(int) executionTime[DELETE]));
output.collect(NUMOPS_DELETE, new IntWritable((int) numOfOps[DELETE]));
output.collect(CREATE_EXECTIME, new IntWritable(
(int) executionTime[CREATE]));
output.collect(NUMOPS_CREATE, new IntWritable((int) numOfOps[CREATE]));
output.collect(WRITE_CLOSE_EXECTIME, new IntWritable(
(int) executionTime[WRITE_CLOSE]));
output.collect(NUMOPS_WRITE_CLOSE, new IntWritable(
(int) numOfOps[WRITE_CLOSE]));
output.collect(TOTALOPS, new IntWritable((int) totalOps));
output.collect(ELAPSED_TIME, new IntWritable((int) totalTime));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void getArgsFromConfiguration(Configuration conf) {
maxDelayBetweenOps = conf.getInt(LG_MAXDELAYBETWEENOPS,
maxDelayBetweenOps);
numOfThreads = conf.getInt(LG_NUMOFTHREADS, numOfThreads);
readProbs[0] = Double.parseDouble(conf.get(LG_READPR, readProbs[0] + ""));
writeProbs[0] = Double.parseDouble(conf.get(LG_WRITEPR, writeProbs[0]
+ ""));
seed = conf.getLong(LG_SEED, seed);
numMapTasks = conf.getInt(LG_NUMMAPTASKS, numMapTasks);
root = new Path(conf.get(LG_ROOT, root.toString()));
durations[0] = conf.getLong(LG_ELAPSEDTIME, 0);
startTime = conf.getLong(LG_STARTTIME, 0);
scriptFile = conf.get(LG_SCRIPTFILE, null);
flagFile = new Path(conf.get(LG_FLAGFILE, FLAGFILE_DEFAULT));
if (durations[0] > 0 && scriptFile != null) {
System.err.println("Cannot specify both ElapsedTime and ScriptFile, exiting");
System.exit(-1);
}
try {
if (scriptFile != null && loadScriptFile(scriptFile, false) < 0) {
System.err.println("Error in scriptFile, exiting");
System.exit(-1);
}
} catch (IOException e) {
System.err.println("Error loading script file " + scriptFile);
e.printStackTrace();
}
if (durations[0] <= 0) {
System.err.println("A duration of zero or less is not allowed when running via MapReduce.");
System.exit(-1);
}
}
}
public static class ReducerThatCollectsLGdata extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
private JobConf jobConf;
@Override
public void configure(JobConf job) {
this.jobConf = job;
}
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
if (key.equals(OPEN_EXECTIME)){
executionTime[OPEN] = sum;
} else if (key.equals(NUMOPS_OPEN)){
numOfOps[OPEN] = sum;
} else if (key.equals(LIST_EXECTIME)){
executionTime[LIST] = sum;
} else if (key.equals(NUMOPS_LIST)){
numOfOps[LIST] = sum;
} else if (key.equals(DELETE_EXECTIME)){
executionTime[DELETE] = sum;
} else if (key.equals(NUMOPS_DELETE)){
numOfOps[DELETE] = sum;
} else if (key.equals(CREATE_EXECTIME)){
executionTime[CREATE] = sum;
} else if (key.equals(NUMOPS_CREATE)){
numOfOps[CREATE] = sum;
} else if (key.equals(WRITE_CLOSE_EXECTIME)){
System.out.println(WRITE_CLOSE_EXECTIME + " = " + sum);
executionTime[WRITE_CLOSE]= sum;
} else if (key.equals(NUMOPS_WRITE_CLOSE)){
numOfOps[WRITE_CLOSE] = sum;
} else if (key.equals(TOTALOPS)){
totalOps = sum;
} else if (key.equals(ELAPSED_TIME)){
totalTime = sum;
}
result.set(sum);
output.collect(key, result);
// System.out.println("Key = " + key + " Sum is =" + sum);
// printResults(System.out);
}
@Override
public void close() throws IOException {
// Output the result to a file Results in the output dir
FileContext fc;
try {
fc = FileContext.getFileContext(jobConf);
} catch (IOException ioe) {
System.err.println("Can not initialize the file system: " +
ioe.getLocalizedMessage());
return;
}
FSDataOutputStream o = fc.create(FileOutputFormat.getTaskOutputPath(jobConf, "Results"),
EnumSet.of(CreateFlag.CREATE));
PrintStream out = new PrintStream(o);
printResults(out);
out.close();
o.close();
}
}
}
| 17,312 | 34.770661 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Formatter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.text.DecimalFormat;
import java.text.NumberFormat;
/**
* Simple class that holds the number formatters used in the slive application
*/
class Formatter {
private static final String NUMBER_FORMAT = "###.###";
private static NumberFormat decFormatter = null;
private static NumberFormat percFormatter = null;
/**
* No construction allowed - only simple static accessor functions
*/
private Formatter() {
}
/**
* Gets a decimal formatter that has 3 decimal point precision
*
* @return NumberFormat formatter
*/
static synchronized NumberFormat getDecimalFormatter() {
if (decFormatter == null) {
decFormatter = new DecimalFormat(NUMBER_FORMAT);
}
return decFormatter;
}
/**
* Gets a percent formatter that has 3 decimal point precision
*
* @return NumberFormat formatter
*/
static synchronized NumberFormat getPercentFormatter() {
if (percFormatter == null) {
percFormatter = NumberFormat.getPercentInstance();
percFormatter.setMaximumFractionDigits(3);
}
return percFormatter;
}
}
| 1,946 | 27.632353 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Constants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.hadoop.util.StringUtils;
/**
* Constants used in various places in slive
*/
class Constants {
/**
* This class should be static members only - no construction allowed
*/
private Constants() {
}
/**
* The distributions supported (or that maybe supported)
*/
enum Distribution {
BEG, END, UNIFORM, MID;
String lowerName() {
return StringUtils.toLowerCase(this.name());
}
}
/**
* Allowed operation types
*/
enum OperationType {
READ, APPEND, RENAME, LS, MKDIR, DELETE, CREATE, TRUNCATE;
String lowerName() {
return StringUtils.toLowerCase(this.name());
}
}
// program info
static final String PROG_NAME = SliveTest.class.getSimpleName();
static final String PROG_VERSION = "0.1.0";
// useful constants
static final int MEGABYTES = 1048576;
// must be a multiple of
// BYTES_PER_LONG - used for reading and writing buffer sizes
static final int BUFFERSIZE = 64 * 1024;
// 8 bytes per long
static final int BYTES_PER_LONG = 8;
// used for finding the reducer file for a given number
static final String REDUCER_FILE = "part-%s";
// this is used to ensure the blocksize is a multiple of this config setting
static final String BYTES_PER_CHECKSUM = "io.bytes.per.checksum";
// min replication setting for verification
static final String MIN_REPLICATION = "dfs.namenode.replication.min";
// used for getting an option description given a set of distributions
// to substitute
static final String OP_DESCR = "pct,distribution where distribution is one of %s";
// keys for looking up a specific operation in the hadoop config
static final String OP_PERCENT = "slive.op.%s.pct";
static final String OP = "slive.op.%s";
static final String OP_DISTR = "slive.op.%s.dist";
// path constants
static final String BASE_DIR = "slive";
static final String DATA_DIR = "data";
static final String OUTPUT_DIR = "output";
// whether whenever data is written a flush should occur
static final boolean FLUSH_WRITES = false;
}
| 2,913 | 29.673684 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/RenameOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and a second random file and attempts
* to rename that first file into the second file.
*
* This operation will capture statistics on success the time taken to rename
* those files and the number of successful renames that occurred and on failure
* or error it will capture the number of failures and the amount of time taken
* to fail
*/
class RenameOp extends Operation {
/**
* Class that holds the src and target for renames
*/
protected static class SrcTarget {
private Path src, target;
SrcTarget(Path src, Path target) {
this.src = src;
this.target = target;
}
Path getSrc() {
return src;
}
Path getTarget() {
return target;
}
}
private static final Log LOG = LogFactory.getLog(RenameOp.class);
RenameOp(ConfigExtractor cfg, Random rnd) {
super(RenameOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file names to rename
*
* @return SrcTarget
*/
protected SrcTarget getRenames() {
Path src = getFinder().getFile();
Path target = getFinder().getFile();
return new SrcTarget(src, target);
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
// find the files to modify
SrcTarget targets = getRenames();
Path src = targets.getSrc();
Path target = targets.getTarget();
// capture results
boolean renamedOk = false;
long timeTaken = 0;
{
// rename it
long startTime = Timer.now();
renamedOk = fs.rename(src, target);
timeTaken = Timer.elapsed(startTime);
}
if (renamedOk) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Renamed " + src + " to " + target);
} else {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Could not rename " + src + " to " + target);
}
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with renaming", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with renaming", e);
}
return out;
}
}
| 3,750 | 30.258333 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.