repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.doReturn;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.*;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.Test;
/** Unit tests for using Job Token over RPC.
*
* System properties required:
* -Djava.security.krb5.conf=.../hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/test-classes/krb5.conf
* -Djava.net.preferIPv4Stack=true
*/
public class TestUmbilicalProtocolWithJobToken {
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG = LogFactory
.getLog(TestUmbilicalProtocolWithJobToken.class);
private static Configuration conf;
static {
conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
}
@Test
public void testJobTokenRpc() throws Exception {
TaskUmbilicalProtocol mockTT = mock(TaskUmbilicalProtocol.class);
doReturn(TaskUmbilicalProtocol.versionID)
.when(mockTT).getProtocolVersion(anyString(), anyLong());
doReturn(ProtocolSignature.getProtocolSignature(
mockTT, TaskUmbilicalProtocol.class.getName(),
TaskUmbilicalProtocol.versionID, 0))
.when(mockTT).getProtocolSignature(anyString(), anyLong(), anyInt());
JobTokenSecretManager sm = new JobTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TaskUmbilicalProtocol.class).setInstance(mockTT)
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
String jobId = current.getUserName();
JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId));
Token<JobTokenIdentifier> token = new Token<JobTokenIdentifier>(tokenId, sm);
sm.addTokenForJob(jobId, token);
SecurityUtil.setTokenService(token, addr);
LOG.info("Service address for token is " + token.getService());
current.addToken(token);
current.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
TaskUmbilicalProtocol proxy = null;
try {
proxy = (TaskUmbilicalProtocol) RPC.getProxy(
TaskUmbilicalProtocol.class, TaskUmbilicalProtocol.versionID,
addr, conf);
proxy.ping(null);
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
return null;
}
});
}
}
| 5,090 | 37.862595 | 146 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestMRCredentials.java
|
package org.apache.hadoop.mapreduce.security;
/** Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRClientCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests whether a protected secret passed from JobClient is
* available to the child task
*/
public class TestMRCredentials {
static final int NUM_OF_KEYS = 10;
private static MiniMRClientCluster mrCluster;
private static MiniDFSCluster dfsCluster;
private static int numSlaves = 1;
private static JobConf jConf;
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
createKeysAsJson("keys.json");
}
@AfterClass
public static void tearDown() throws Exception {
if(mrCluster != null)
mrCluster.stop();
mrCluster = null;
if(dfsCluster != null)
dfsCluster.shutdown();
dfsCluster = null;
new File("keys.json").delete();
}
public static void createKeysAsJson (String fileName)
throws FileNotFoundException, IOException{
StringBuilder jsonString = new StringBuilder();
jsonString.append("{");
for(int i=0; i<NUM_OF_KEYS; i++) {
String keyName = "alias"+i;
String password = "password"+i;
jsonString.append("\""+ keyName +"\":"+ "\""+password+"\"" );
if (i < (NUM_OF_KEYS-1)){
jsonString.append(",");
}
}
jsonString.append("}");
FileOutputStream fos= new FileOutputStream (fileName);
fos.write(jsonString.toString().getBytes());
fos.close();
}
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test
public void test () throws IOException {
// make sure JT starts
Configuration jobConf = new JobConf(mrCluster.getConfig());
// provide namenodes names for the job to get the delegation tokens for
//String nnUri = dfsCluster.getNameNode().getUri(namenode).toString();
NameNode nn = dfsCluster.getNameNode();
URI nnUri = NameNode.getUri(nn.getNameNodeAddress());
jobConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString());
jobConf.set("mapreduce.job.credentials.json" , "keys.json");
// using argument to pass the file name
String[] args = {
"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
};
int res = -1;
try {
res = ToolRunner.run(jobConf, new CredentialsTestJob(), args);
} catch (Exception e) {
System.out.println("Job failed with" + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0", res, 0);
}
}
| 4,405 | 30.697842 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.hs.HistoryClientService;
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService;
import org.apache.hadoop.mapreduce.v2.hs.JHSDelegationTokenSecretManager;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.Test;
public class TestJHSSecurity {
private static final Log LOG = LogFactory.getLog(TestJHSSecurity.class);
@Test
public void testDelegationToken() throws IOException, InterruptedException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
final YarnConfiguration conf = new YarnConfiguration(new JobConf());
// Just a random principle
conf.set(JHAdminConfig.MR_HISTORY_PRINCIPAL,
"RandomOrc/[email protected]");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
final long initialInterval = 10000l;
final long maxLifetime= 20000l;
final long renewInterval = 10000l;
JobHistoryServer jobHistoryServer = null;
MRClientProtocol clientUsingDT = null;
long tokenFetchTime;
try {
jobHistoryServer = new JobHistoryServer() {
protected void doSecureLogin(Configuration conf) throws IOException {
// no keytab based login
};
@Override
protected JHSDelegationTokenSecretManager createJHSSecretManager(
Configuration conf, HistoryServerStateStoreService store) {
return new JHSDelegationTokenSecretManager(initialInterval,
maxLifetime, renewInterval, 3600000, store);
}
@Override
protected HistoryClientService createHistoryClientService() {
return new HistoryClientService(historyContext,
this.jhsDTSecretManager) {
@Override
protected void initializeWebApp(Configuration conf) {
// Don't need it, skip.;
}
};
}
};
// final JobHistoryServer jobHistoryServer = jhServer;
jobHistoryServer.init(conf);
jobHistoryServer.start();
final MRClientProtocol hsService = jobHistoryServer.getClientService()
.getClientHandler();
// Fake the authentication-method
UserGroupInformation loggedInUser = UserGroupInformation
.createRemoteUser("[email protected]");
Assert.assertEquals("testrenewer", loggedInUser.getShortUserName());
// Default realm is APACHE.ORG
loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
Token token = getDelegationToken(loggedInUser, hsService,
loggedInUser.getShortUserName());
tokenFetchTime = System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
// Now try talking to JHS using the delegation token
clientUsingDT = getMRClientProtocol(token, jobHistoryServer
.getClientService().getBindAddress(), "TheDarkLord", conf);
GetJobReportRequest jobReportRequest =
Records.newRecord(GetJobReportRequest.class);
jobReportRequest.setJobId(MRBuilderUtils.newJobId(123456, 1, 1));
try {
clientUsingDT.getJobReport(jobReportRequest);
} catch (IOException e) {
Assert.assertEquals("Unknown job job_123456_0001", e.getMessage());
}
// Renew after 50% of token age.
while(System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) {
Thread.sleep(500l);
}
long nextExpTime = renewDelegationToken(loggedInUser, hsService, token);
long renewalTime = System.currentTimeMillis();
LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "
+ nextExpTime);
// Wait for first expiry, but before renewed expiry.
while (System.currentTimeMillis() > tokenFetchTime + initialInterval
&& System.currentTimeMillis() < nextExpTime) {
Thread.sleep(500l);
}
Thread.sleep(50l);
// Valid token because of renewal.
try {
clientUsingDT.getJobReport(jobReportRequest);
} catch (IOException e) {
Assert.assertEquals("Unknown job job_123456_0001", e.getMessage());
}
// Wait for expiry.
while(System.currentTimeMillis() < renewalTime + renewInterval) {
Thread.sleep(500l);
}
Thread.sleep(50l);
LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid");
// Token should have expired.
try {
clientUsingDT.getJobReport(jobReportRequest);
fail("Should not have succeeded with an expired token");
} catch (IOException e) {
assertTrue(e.getCause().getMessage().contains("is expired"));
}
// Test cancellation
// Stop the existing proxy, start another.
if (clientUsingDT != null) {
// RPC.stopProxy(clientUsingDT);
clientUsingDT = null;
}
token = getDelegationToken(loggedInUser, hsService,
loggedInUser.getShortUserName());
tokenFetchTime = System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
// Now try talking to HSService using the delegation token
clientUsingDT = getMRClientProtocol(token, jobHistoryServer
.getClientService().getBindAddress(), "loginuser2", conf);
try {
clientUsingDT.getJobReport(jobReportRequest);
} catch (IOException e) {
fail("Unexpected exception" + e);
}
cancelDelegationToken(loggedInUser, hsService, token);
// Testing the token with different renewer to cancel the token
Token tokenWithDifferentRenewer = getDelegationToken(loggedInUser,
hsService, "yarn");
cancelDelegationToken(loggedInUser, hsService, tokenWithDifferentRenewer);
if (clientUsingDT != null) {
// RPC.stopProxy(clientUsingDT);
clientUsingDT = null;
}
// Creating a new connection.
clientUsingDT = getMRClientProtocol(token, jobHistoryServer
.getClientService().getBindAddress(), "loginuser2", conf);
LOG.info("Cancelled delegation token at: " + System.currentTimeMillis());
// Verify cancellation worked.
try {
clientUsingDT.getJobReport(jobReportRequest);
fail("Should not have succeeded with a cancelled delegation token");
} catch (IOException e) {
}
} finally {
jobHistoryServer.stop();
}
}
private Token getDelegationToken(
final UserGroupInformation loggedInUser,
final MRClientProtocol hsService, final String renewerString)
throws IOException, InterruptedException {
// Get the delegation token directly as it is a little difficult to setup
// the kerberos based rpc.
Token token = loggedInUser
.doAs(new PrivilegedExceptionAction<Token>() {
@Override
public Token run() throws IOException {
GetDelegationTokenRequest request = Records
.newRecord(GetDelegationTokenRequest.class);
request.setRenewer(renewerString);
return hsService.getDelegationToken(request).getDelegationToken();
}
});
return token;
}
private long renewDelegationToken(final UserGroupInformation loggedInUser,
final MRClientProtocol hsService, final Token dToken)
throws IOException, InterruptedException {
long nextExpTime = loggedInUser.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws IOException {
RenewDelegationTokenRequest request = Records
.newRecord(RenewDelegationTokenRequest.class);
request.setDelegationToken(dToken);
return hsService.renewDelegationToken(request).getNextExpirationTime();
}
});
return nextExpTime;
}
private void cancelDelegationToken(final UserGroupInformation loggedInUser,
final MRClientProtocol hsService, final Token dToken)
throws IOException, InterruptedException {
loggedInUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
CancelDelegationTokenRequest request = Records
.newRecord(CancelDelegationTokenRequest.class);
request.setDelegationToken(dToken);
hsService.cancelDelegationToken(request);
return null;
}
});
}
private MRClientProtocol getMRClientProtocol(Token token,
final InetSocketAddress hsAddress, String user, final Configuration conf) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
ugi.addToken(ConverterUtils.convertFromYarn(token, hsAddress));
final YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol hsWithDT = ugi
.doAs(new PrivilegedAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() {
return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
hsAddress, conf);
}
});
return hsWithDT;
}
}
| 11,482 | 37.276667 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java
|
/** Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestBinaryTokenFile {
private static final String KEY_SECURITY_TOKEN_FILE_NAME = "key-security-token-file";
private static final String DELEGATION_TOKEN_KEY = "Hdfs";
// my sleep class
static class MySleepMapper extends SleepJob.SleepMapper {
/**
* attempts to access tokenCache as from client
*/
@Override
public void map(IntWritable key, IntWritable value, Context context)
throws IOException, InterruptedException {
// get context token storage:
final Credentials contextCredentials = context.getCredentials();
final Collection<Token<? extends TokenIdentifier>> contextTokenCollection = contextCredentials.getAllTokens();
for (Token<? extends TokenIdentifier> t : contextTokenCollection) {
System.out.println("Context token: [" + t + "]");
}
if (contextTokenCollection.size() != 2) { // one job token and one delegation token
// fail the test:
throw new RuntimeException("Exactly 2 tokens are expected in the contextTokenCollection: " +
"one job token and one delegation token, but was found " + contextTokenCollection.size() + " tokens.");
}
final Token<? extends TokenIdentifier> dt = contextCredentials.getToken(new Text(DELEGATION_TOKEN_KEY));
if (dt == null) {
throw new RuntimeException("Token for key ["+DELEGATION_TOKEN_KEY+"] not found in the job context.");
}
String tokenFile0 = context.getConfiguration().get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
if (tokenFile0 != null) {
throw new RuntimeException("Token file key ["+MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY+"] found in the configuration. It should have been removed from the configuration.");
}
final String tokenFile = context.getConfiguration().get(KEY_SECURITY_TOKEN_FILE_NAME);
if (tokenFile == null) {
throw new RuntimeException("Token file key ["+KEY_SECURITY_TOKEN_FILE_NAME+"] not found in the job configuration.");
}
final Credentials binaryCredentials = new Credentials();
binaryCredentials.readTokenStorageStream(new DataInputStream(new FileInputStream(
tokenFile)));
final Collection<Token<? extends TokenIdentifier>> binaryTokenCollection = binaryCredentials.getAllTokens();
if (binaryTokenCollection.size() != 1) {
throw new RuntimeException("The token collection read from file ["+tokenFile+"] must have size = 1.");
}
final Token<? extends TokenIdentifier> binTok = binaryTokenCollection.iterator().next();
System.out.println("The token read from binary file: t = [" + binTok + "]");
// Verify that dt is same as the token in the file:
if (!dt.equals(binTok)) {
throw new RuntimeException(
"Delegation token in job is not same as the token passed in file:"
+ " tokenInFile=[" + binTok + "], dt=[" + dt + "].");
}
// Now test the user tokens.
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// Print all the UGI tokens for diagnostic purposes:
final Collection<Token<? extends TokenIdentifier>> ugiTokenCollection = ugi.getTokens();
for (Token<? extends TokenIdentifier> t: ugiTokenCollection) {
System.out.println("UGI token: [" + t + "]");
}
final Token<? extends TokenIdentifier> ugiToken
= ugi.getCredentials().getToken(new Text(DELEGATION_TOKEN_KEY));
if (ugiToken == null) {
throw new RuntimeException("Token for key ["+DELEGATION_TOKEN_KEY+"] not found among the UGI tokens.");
}
if (!ugiToken.equals(binTok)) {
throw new RuntimeException(
"UGI token is not same as the token passed in binary file:"
+ " tokenInBinFile=[" + binTok + "], ugiTok=[" + ugiToken + "].");
}
super.map(key, value, context);
}
}
class MySleepJob extends SleepJob {
@Override
public Job createJob(int numMapper, int numReducer,
long mapSleepTime, int mapSleepCount,
long reduceSleepTime, int reduceSleepCount)
throws IOException {
Job job = super.createJob(numMapper, numReducer,
mapSleepTime, mapSleepCount,
reduceSleepTime, reduceSleepCount);
job.setMapperClass(MySleepMapper.class);
//Populate tokens here because security is disabled.
setupBinaryTokenFile(job);
return job;
}
private void setupBinaryTokenFile(Job job) {
// Credentials in the job will not have delegation tokens
// because security is disabled. Fetch delegation tokens
// and store in binary token file.
createBinaryTokenFile(job.getConfiguration());
job.getConfiguration().set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,
binaryTokenFileName.toString());
// NB: the MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY
// key now gets deleted from config,
// so it's not accessible in the job's config. So,
// we use another key to pass the file name into the job configuration:
job.getConfiguration().set(KEY_SECURITY_TOKEN_FILE_NAME,
binaryTokenFileName.toString());
}
}
private static MiniMRYarnCluster mrCluster;
private static MiniDFSCluster dfsCluster;
private static final Path TEST_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
private static final Path binaryTokenFileName = new Path(TEST_DIR, "tokenFile.binary");
private static final int numSlaves = 1; // num of data nodes
private static final int noOfNMs = 1;
private static Path p1;
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.checkExitOnShutdown(true);
builder.numDataNodes(numSlaves);
builder.format(true);
builder.racks(null);
dfsCluster = builder.build();
mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
mrCluster.init(conf);
mrCluster.start();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
p1 = fs.makeQualified(p1);
}
@AfterClass
public static void tearDown() throws Exception {
if(mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
if(dfsCluster != null) {
dfsCluster.shutdown();
dfsCluster = null;
}
}
private static void createBinaryTokenFile(Configuration conf) {
// Fetch delegation tokens and store in binary token file.
try {
Credentials cred1 = new Credentials();
Credentials cred2 = new Credentials();
TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 },
conf);
for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) {
cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t);
}
DataOutputStream os = new DataOutputStream(new FileOutputStream(
binaryTokenFileName.toString()));
try {
cred2.writeTokenStorageToStream(os);
} finally {
os.close();
}
} catch (IOException e) {
Assert.fail("Exception " + e);
}
}
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test
public void testBinaryTokenFile() throws IOException {
Configuration conf = mrCluster.getConfig();
// provide namenodes names for the job to get the delegation tokens for
final String nnUri = dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
// using argument to pass the file name
final String[] args = {
"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
};
int res = -1;
try {
res = ToolRunner.run(conf, new MySleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
/**
* run a distributed job with -tokenCacheFile option parameter and
* verify that no exception happens.
* @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
Configuration conf = mrCluster.getConfig();
createBinaryTokenFile(conf);
// provide namenodes names for the job to get the delegation tokens for
final String nnUri = dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
// using argument to pass the file name
final String[] args = {
"-tokenCacheFile", binaryTokenFileName.toString(),
"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
};
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
}
| 11,523 | 38.465753 | 184 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/token/delegation/TestDelegationToken.java
|
/** Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.token.delegation;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
@Ignore
public class TestDelegationToken {
private MiniMRCluster cluster;
private UserGroupInformation user1;
private UserGroupInformation user2;
@Before
public void setup() throws Exception {
user1 = UserGroupInformation.createUserForTesting("alice",
new String[]{"users"});
user2 = UserGroupInformation.createUserForTesting("bob",
new String[]{"users"});
cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
@SuppressWarnings("deprecation")
@Test
public void testDelegationToken() throws Exception {
final JobClient client;
client = user1.doAs(new PrivilegedExceptionAction<JobClient>(){
@Override
public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
});
final JobClient bobClient;
bobClient = user2.doAs(new PrivilegedExceptionAction<JobClient>(){
@Override
public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
});
final Token<DelegationTokenIdentifier> token =
client.getDelegationToken(new Text(user1.getUserName()));
DataInputBuffer inBuf = new DataInputBuffer();
byte[] bytes = token.getIdentifier();
inBuf.reset(bytes, bytes.length);
DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
ident.readFields(inBuf);
assertEquals("alice", ident.getUser().getUserName());
long createTime = ident.getIssueDate();
long maxTime = ident.getMaxDate();
long currentTime = System.currentTimeMillis();
System.out.println("create time: " + createTime);
System.out.println("current time: " + currentTime);
System.out.println("max time: " + maxTime);
assertTrue("createTime < current", createTime < currentTime);
assertTrue("current < maxTime", currentTime < maxTime);
// renew should work as user alice
user1.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
client.renewDelegationToken(token);
client.renewDelegationToken(token);
return null;
}
});
// bob should fail to renew
user2.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
bobClient.renewDelegationToken(token);
Assert.fail("bob renew");
} catch (AccessControlException ace) {
// PASS
}
return null;
}
});
// bob should fail to cancel
user2.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
bobClient.cancelDelegationToken(token);
Assert.fail("bob cancel");
} catch (AccessControlException ace) {
// PASS
}
return null;
}
});
// alice should be able to cancel but only cancel once
user1.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
client.cancelDelegationToken(token);
try {
client.cancelDelegationToken(token);
Assert.fail("second alice cancel");
} catch (InvalidToken it) {
// PASS
}
return null;
}
});
}
}
| 4,904 | 33.0625 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security.ssl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRClientCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Assert;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
public class TestEncryptedShuffle {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestEncryptedShuffle.class.getSimpleName();
private String classpathDir;
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
}
@Before
public void createCustomYarnClasspath() throws Exception {
classpathDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
new File(classpathDir, "core-site.xml").delete();
}
@After
public void cleanUpMiniClusterSpecialConfig() throws Exception {
new File(classpathDir, "core-site.xml").delete();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, classpathDir);
}
private MiniDFSCluster dfsCluster = null;
private MiniMRClientCluster mrCluster = null;
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
private void stopCluster() throws Exception {
if (mrCluster != null) {
mrCluster.stop();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
protected JobConf getJobConf() throws IOException {
return new JobConf(mrCluster.getConfig());
}
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
@Test
public void encryptedShuffleWithClientCerts() throws Exception {
encryptedShuffleWithCerts(true);
}
@Test
public void encryptedShuffleWithoutClientCerts() throws Exception {
encryptedShuffleWithCerts(false);
}
}
| 6,374 | 35.016949 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/TestMRAsyncDiskService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
import org.junit.Test;
/**
* A test for MRAsyncDiskService.
*/
public class TestMRAsyncDiskService extends TestCase {
public static final Log LOG = LogFactory.getLog(TestMRAsyncDiskService.class);
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString();
@Override
protected void setUp() {
FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
}
/**
* Given 'pathname', compute an equivalent path relative to the cwd.
* @param pathname the path to a directory.
* @return the path to that same directory, relative to ${user.dir}.
*/
private String relativeToWorking(String pathname) {
String cwd = System.getProperty("user.dir", "/");
// normalize pathname and cwd into full directory paths.
pathname = (new Path(pathname)).toUri().getPath();
cwd = (new Path(cwd)).toUri().getPath();
String [] cwdParts = cwd.split(Path.SEPARATOR);
String [] pathParts = pathname.split(Path.SEPARATOR);
// There are three possible cases:
// 1) pathname and cwd are equal. Return '.'
// 2) pathname is under cwd. Return the components that are under it.
// e.g., cwd = /a/b, path = /a/b/c, return 'c'
// 3) pathname is outside of cwd. Find the common components, if any,
// and subtract them from the returned path, then return enough '..'
// components to "undo" the non-common components of cwd, then all
// the remaining parts of pathname.
// e.g., cwd = /a/b, path = /a/c, return '../c'
if (cwd.equals(pathname)) {
LOG.info("relative to working: " + pathname + " -> .");
return "."; // They match exactly.
}
// Determine how many path components are in common between cwd and path.
int common = 0;
for (int i = 0; i < Math.min(cwdParts.length, pathParts.length); i++) {
if (cwdParts[i].equals(pathParts[i])) {
common++;
} else {
break;
}
}
// output path stringbuilder.
StringBuilder sb = new StringBuilder();
// For everything in cwd that isn't in pathname, add a '..' to undo it.
int parentDirsRequired = cwdParts.length - common;
for (int i = 0; i < parentDirsRequired; i++) {
sb.append("..");
sb.append(Path.SEPARATOR);
}
// Then append all non-common parts of 'pathname' itself.
for (int i = common; i < pathParts.length; i++) {
sb.append(pathParts[i]);
sb.append(Path.SEPARATOR);
}
// Don't end with a '/'.
String s = sb.toString();
if (s.endsWith(Path.SEPARATOR)) {
s = s.substring(0, s.length() - 1);
}
LOG.info("relative to working: " + pathname + " -> " + s);
return s;
}
@Test
/** Test that the relativeToWorking() method above does what we expect. */
public void testRelativeToWorking() {
assertEquals(".", relativeToWorking(System.getProperty("user.dir", ".")));
String cwd = System.getProperty("user.dir", ".");
Path cwdPath = new Path(cwd);
Path subdir = new Path(cwdPath, "foo");
assertEquals("foo", relativeToWorking(subdir.toUri().getPath()));
Path subsubdir = new Path(subdir, "bar");
assertEquals("foo/bar", relativeToWorking(subsubdir.toUri().getPath()));
Path parent = new Path(cwdPath, "..");
assertEquals("..", relativeToWorking(parent.toUri().getPath()));
Path sideways = new Path(parent, "baz");
assertEquals("../baz", relativeToWorking(sideways.toUri().getPath()));
}
@Test
/** Test that volumes specified as relative paths are handled properly
* by MRAsyncDiskService (MAPREDUCE-1887).
*/
public void testVolumeNormalization() throws Throwable {
LOG.info("TEST_ROOT_DIR is " + TEST_ROOT_DIR);
String relativeTestRoot = relativeToWorking(TEST_ROOT_DIR);
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String [] vols = new String[] { relativeTestRoot + "/0",
relativeTestRoot + "/1" };
// Put a file in one of the volumes to be cleared on startup.
Path delDir = new Path(vols[0], MRAsyncDiskService.TOBEDELETED);
localFileSystem.mkdirs(delDir);
localFileSystem.create(new Path(delDir, "foo")).close();
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories and then removes them through
* MRAsyncDiskService.
*/
@Test
public void testMRAsyncDiskService() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
File fa = new File(vols[0], a);
File fb = new File(vols[1], b);
File fc = new File(vols[1], c);
File fd = new File(vols[1], d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Move and delete them
service.moveAndDeleteRelativePath(vols[0], a);
assertFalse(fa.exists());
service.moveAndDeleteRelativePath(vols[1], b);
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(service.moveAndDeleteRelativePath(vols[1], "not_exists"));
// asyncDiskService is NOT able to delete files outside all volumes.
IOException ee = null;
try {
service.moveAndDeleteAbsolutePath(TEST_ROOT_DIR + "/2");
} catch (IOException e) {
ee = e;
}
assertNotNull("asyncDiskService should not be able to delete files "
+ "outside all volumes", ee);
// asyncDiskService is able to automatically find the file in one
// of the volumes.
assertTrue(service.moveAndDeleteAbsolutePath(vols[1] + Path.SEPARATOR_CHAR + d));
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories inside the volume roots, and then
* call asyncDiskService.MoveAndDeleteAllVolumes.
* We should be able to delete all files/dirs inside the volumes except
* the toBeDeleted directory.
*/
@Test
public void testMRAsyncDiskServiceMoveAndDeleteAllVolumes() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
File fa = new File(vols[0], a);
File fb = new File(vols[1], b);
File fc = new File(vols[1], c);
File fd = new File(vols[1], d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Delete all of them
service.cleanupAllVolumes();
assertFalse(fa.exists());
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(fd.exists());
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
/**
* This test creates some directories inside the toBeDeleted directory and
* then start the asyncDiskService.
* AsyncDiskService will create tasks to delete the content inside the
* toBeDeleted directories.
*/
@Test
public void testMRAsyncDiskServiceStartupCleaning() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
String a = "a";
String b = "b";
String c = "b/c";
String d = "d";
// Create directories inside SUBDIR
String suffix = Path.SEPARATOR_CHAR + MRAsyncDiskService.TOBEDELETED;
File fa = new File(vols[0] + suffix, a);
File fb = new File(vols[1] + suffix, b);
File fc = new File(vols[1] + suffix, c);
File fd = new File(vols[1] + suffix, d);
// Create the directories
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
// Create the asyncDiskService which will delete all contents inside SUBDIR
MRAsyncDiskService service = new MRAsyncDiskService(
localFileSystem, vols);
// Make sure everything is cleaned up
makeSureCleanedUp(vols, service);
}
private void makeSureCleanedUp(String[] vols, MRAsyncDiskService service)
throws Throwable {
// Sleep at most 5 seconds to make sure the deleted items are all gone.
service.shutdown();
if (!service.awaitTermination(5000)) {
fail("MRAsyncDiskService is still not shutdown in 5 seconds!");
}
// All contents should be gone by now.
for (int i = 0; i < vols.length; i++) {
File subDir = new File(vols[0]);
String[] subDirContent = subDir.list();
assertEquals("Volume should contain a single child: "
+ MRAsyncDiskService.TOBEDELETED, 1, subDirContent.length);
File toBeDeletedDir = new File(vols[0], MRAsyncDiskService.TOBEDELETED);
String[] content = toBeDeletedDir.list();
assertNotNull("Cannot find " + toBeDeletedDir, content);
assertEquals("" + toBeDeletedDir + " should be empty now.", 0,
content.length);
}
}
@Test
public void testToleratesSomeUnwritableVolumes() throws Throwable {
FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
String[] vols = new String[]{TEST_ROOT_DIR + "/0",
TEST_ROOT_DIR + "/1"};
assertTrue(new File(vols[0]).mkdirs());
assertEquals(0, FileUtil.chmod(vols[0], "400")); // read only
try {
new MRAsyncDiskService(localFileSystem, vols);
} finally {
FileUtil.chmod(vols[0], "755"); // make writable again
}
}
}
| 11,460 | 31.745714 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.AsyncDiskService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class is a container of multiple thread pools, each for a volume,
* so that we can schedule async disk operations easily.
*
* Examples of async disk operations are deletion of files.
* We can move the files to a "toBeDeleted" folder before asychronously
* deleting it, to make sure the caller can run it faster.
*
* Users should not write files into the "toBeDeleted" folder, otherwise
* the files can be gone any time we restart the MRAsyncDiskService.
*
* This class also contains all operations that will be performed by the
* thread pools.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MRAsyncDiskService {
public static final Log LOG = LogFactory.getLog(MRAsyncDiskService.class);
AsyncDiskService asyncDiskService;
public static final String TOBEDELETED = "toBeDeleted";
/**
* Create a AsyncDiskServices with a set of volumes (specified by their
* root directories).
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async disk
* operations.
*
* @param localFileSystem The localFileSystem used for deletions.
* @param nonCanonicalVols The roots of the file system volumes, which may
* be absolte paths, or paths relative to the ${user.dir} system property
* ("cwd").
*/
public MRAsyncDiskService(FileSystem localFileSystem,
String... nonCanonicalVols) throws IOException {
this.localFileSystem = localFileSystem;
this.volumes = new String[nonCanonicalVols.length];
for (int v = 0; v < nonCanonicalVols.length; v++) {
this.volumes[v] = normalizePath(nonCanonicalVols[v]);
LOG.debug("Normalized volume: " + nonCanonicalVols[v]
+ " -> " + this.volumes[v]);
}
asyncDiskService = new AsyncDiskService(this.volumes);
// Create one ThreadPool per volume
for (int v = 0 ; v < volumes.length; v++) {
// Create the root for file deletion
Path absoluteSubdir = new Path(volumes[v], TOBEDELETED);
if (!localFileSystem.mkdirs(absoluteSubdir)) {
// We should tolerate missing volumes.
LOG.warn("Cannot create " + TOBEDELETED + " in " + volumes[v] + ". Ignored.");
}
}
// Create tasks to delete the paths inside the volumes
for (int v = 0 ; v < volumes.length; v++) {
Path absoluteSubdir = new Path(volumes[v], TOBEDELETED);
FileStatus[] files = null;
try {
// List all files inside the volumes TOBEDELETED sub directory
files = localFileSystem.listStatus(absoluteSubdir);
} catch (Exception e) {
// Ignore exceptions in listStatus
// We tolerate missing sub directories.
}
if (files != null) {
for (int f = 0; f < files.length; f++) {
// Get the relative file name to the root of the volume
String absoluteFilename = files[f].getPath().toUri().getPath();
String relative = TOBEDELETED + Path.SEPARATOR_CHAR
+ files[f].getPath().getName();
DeleteTask task = new DeleteTask(volumes[v], absoluteFilename,
relative);
execute(volumes[v], task);
}
}
}
}
/**
* Initialize MRAsyncDiskService based on conf.
* @param conf local file system and local dirs will be read from conf
*/
public MRAsyncDiskService(JobConf conf) throws IOException {
this(FileSystem.getLocal(conf), conf.getLocalDirs());
}
/**
* Execute the task sometime in the future, using ThreadPools.
*/
synchronized void execute(String root, Runnable task) {
asyncDiskService.execute(root, task);
}
/**
* Gracefully start the shut down of all ThreadPools.
*/
public synchronized void shutdown() {
asyncDiskService.shutdown();
}
/**
* Shut down all ThreadPools immediately.
*/
public synchronized List<Runnable> shutdownNow() {
return asyncDiskService.shutdownNow();
}
/**
* Wait for the termination of the thread pools.
*
* @param milliseconds The number of milliseconds to wait
* @return true if all thread pools are terminated within time limit
* @throws InterruptedException
*/
public synchronized boolean awaitTermination(long milliseconds)
throws InterruptedException {
return asyncDiskService.awaitTermination(milliseconds);
}
private SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss.SSS");
private FileSystem localFileSystem;
private String[] volumes;
private static AtomicLong uniqueId = new AtomicLong(0);
/** A task for deleting a pathName from a volume.
*/
class DeleteTask implements Runnable {
/** The volume that the file is on*/
String volume;
/** The file name before the move */
String originalPath;
/** The file name after the move */
String pathToBeDeleted;
/**
* Delete a file/directory (recursively if needed).
* @param volume The volume that the file/dir is in.
* @param originalPath The original name, relative to volume root.
* @param pathToBeDeleted The name after the move, relative to volume root,
* containing TOBEDELETED.
*/
DeleteTask(String volume, String originalPath, String pathToBeDeleted) {
this.volume = volume;
this.originalPath = originalPath;
this.pathToBeDeleted = pathToBeDeleted;
}
@Override
public String toString() {
// Called in AsyncDiskService.execute for displaying error messages.
return "deletion of " + pathToBeDeleted + " on " + volume
+ " with original name " + originalPath;
}
@Override
public void run() {
boolean success = false;
Exception e = null;
try {
Path absolutePathToBeDeleted = new Path(volume, pathToBeDeleted);
success = localFileSystem.delete(absolutePathToBeDeleted, true);
} catch (Exception ex) {
e = ex;
}
if (!success) {
if (e != null) {
LOG.warn("Failure in " + this + " with exception "
+ StringUtils.stringifyException(e));
} else {
LOG.warn("Failure in " + this);
}
} else {
LOG.debug("Successfully did " + this.toString());
}
}
};
/**
* Move the path name on one volume to a temporary location and then
* delete them.
*
* This functions returns when the moves are done, but not necessarily all
* deletions are done. This is usually good enough because applications
* won't see the path name under the old name anyway after the move.
*
* @param volume The disk volume
* @param pathName The path name relative to volume root.
* @throws IOException If the move failed
* @return false if the file is not found
*/
public boolean moveAndDeleteRelativePath(String volume, String pathName)
throws IOException {
volume = normalizePath(volume);
// Move the file right now, so that it can be deleted later
String newPathName =
format.format(new Date()) + "_" + uniqueId.getAndIncrement();
newPathName = TOBEDELETED + Path.SEPARATOR_CHAR + newPathName;
Path source = new Path(volume, pathName);
Path target = new Path(volume, newPathName);
try {
if (!localFileSystem.rename(source, target)) {
// If the source does not exists, return false.
// This is necessary because rename can return false if the source
// does not exists.
if (!localFileSystem.exists(source)) {
return false;
}
// Try to recreate the parent directory just in case it gets deleted.
if (!localFileSystem.mkdirs(new Path(volume, TOBEDELETED))) {
throw new IOException("Cannot create " + TOBEDELETED + " under "
+ volume);
}
// Try rename again. If it fails, return false.
if (!localFileSystem.rename(source, target)) {
throw new IOException("Cannot rename " + source + " to "
+ target);
}
}
} catch (FileNotFoundException e) {
// Return false in case that the file is not found.
return false;
}
DeleteTask task = new DeleteTask(volume, pathName, newPathName);
execute(volume, task);
return true;
}
/**
* Move the path name on each volume to a temporary location and then
* delete them.
*
* This functions returns when the moves are done, but not necessarily all
* deletions are done. This is usually good enough because applications
* won't see the path name under the old name anyway after the move.
*
* @param pathName The path name relative to each volume root
* @throws IOException If any of the move failed
* @return false If any of the target pathName did not exist,
* note that the operation is still done on all volumes.
*/
public boolean moveAndDeleteFromEachVolume(String pathName) throws IOException {
boolean result = true;
for (int i = 0; i < volumes.length; i++) {
result = result && moveAndDeleteRelativePath(volumes[i], pathName);
}
return result;
}
/**
* Move all files/directories inside volume into TOBEDELETED, and then
* delete them. The TOBEDELETED directory itself is ignored.
*/
public void cleanupAllVolumes() throws IOException {
for (int v = 0; v < volumes.length; v++) {
// List all files inside the volumes
FileStatus[] files = null;
try {
files = localFileSystem.listStatus(new Path(volumes[v]));
} catch (Exception e) {
// Ignore exceptions in listStatus
// We tolerate missing volumes.
}
if (files != null) {
for (int f = 0; f < files.length; f++) {
// Get the file name - the last component of the Path
String entryName = files[f].getPath().getName();
// Do not delete the current TOBEDELETED
if (!TOBEDELETED.equals(entryName)) {
moveAndDeleteRelativePath(volumes[v], entryName);
}
}
}
}
}
/**
* Returns the normalized path of a path.
*/
private String normalizePath(String path) {
return (new Path(path)).makeQualified(this.localFileSystem)
.toUri().getPath();
}
/**
* Get the relative path name with respect to the root of the volume.
* @param absolutePathName The absolute path name
* @param volume Root of the volume.
* @return null if the absolute path name is outside of the volume.
*/
private String getRelativePathName(String absolutePathName,
String volume) {
absolutePathName = normalizePath(absolutePathName);
// Get the file names
if (!absolutePathName.startsWith(volume)) {
return null;
}
// Get rid of the volume prefix
String fileName = absolutePathName.substring(volume.length());
if (fileName.charAt(0) == Path.SEPARATOR_CHAR) {
fileName = fileName.substring(1);
}
return fileName;
}
/**
* Move the path name to a temporary location and then delete it.
*
* Note that if there is no volume that contains this path, the path
* will stay as it is, and the function will return false.
*
* This functions returns when the moves are done, but not necessarily all
* deletions are done. This is usually good enough because applications
* won't see the path name under the old name anyway after the move.
*
* @param absolutePathName The path name from root "/"
* @throws IOException If the move failed
* @return false if we are unable to move the path name
*/
public boolean moveAndDeleteAbsolutePath(String absolutePathName)
throws IOException {
for (int v = 0; v < volumes.length; v++) {
String relative = getRelativePathName(absolutePathName, volumes[v]);
if (relative != null) {
return moveAndDeleteRelativePath(volumes[v], relative);
}
}
throw new IOException("Cannot delete " + absolutePathName
+ " because it's outside of all volumes.");
}
}
| 13,649 | 34.180412 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestRMNMInfo {
private static final Log LOG = LogFactory.getLog(TestRMNMInfo.class);
private static final int NUMNODEMANAGERS = 4;
protected static MiniMRYarnCluster mrCluster;
private static Configuration initialConf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(initialConf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR =
new Path("target",TestRMNMInfo.class.getName() + "-tmpDir")
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@BeforeClass
public static void setup() throws IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestRMNMInfo.class.getName(),
NUMNODEMANAGERS);
Configuration conf = new Configuration();
mrCluster.init(conf);
mrCluster.start();
}
// workaround the absent public distcache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
}
@Test
public void testRMNMInfo() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
RMContext rmc = mrCluster.getResourceManager().getRMContext();
ResourceScheduler rms = mrCluster.getResourceManager()
.getResourceScheduler();
RMNMInfo rmInfo = new RMNMInfo(rmc,rms);
String liveNMs = rmInfo.getLiveNodeManagers();
ObjectMapper mapper = new ObjectMapper();
JsonNode jn = mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",
NUMNODEMANAGERS, jn.size());
Iterator<JsonNode> it = jn.iterator();
while (it.hasNext()) {
JsonNode n = it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",
n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNotNull(n.get("NumContainers"));
Assert.assertEquals(
n.get("NodeId") + ": Unexpected number of used containers",
0, n.get("NumContainers").asInt());
Assert.assertEquals(
n.get("NodeId") + ": Unexpected amount of used memory",
0, n.get("UsedMemoryMB").asInt());
Assert.assertNotNull(n.get("AvailableMemoryMB"));
}
}
@Test
public void testRMNMInfoMissmatch() throws Exception {
RMContext rmc = mock(RMContext.class);
ResourceScheduler rms = mock(ResourceScheduler.class);
ConcurrentMap<NodeId, RMNode> map = new ConcurrentHashMap<NodeId, RMNode>();
RMNode node = MockNodes.newNodeInfo(1, MockNodes.newResource(4 * 1024));
map.put(node.getNodeID(), node);
when(rmc.getRMNodes()).thenReturn(map);
RMNMInfo rmInfo = new RMNMInfo(rmc,rms);
String liveNMs = rmInfo.getLiveNodeManagers();
ObjectMapper mapper = new ObjectMapper();
JsonNode jn = mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",
1, jn.size());
Iterator<JsonNode> it = jn.iterator();
while (it.hasNext()) {
JsonNode n = it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",
n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNull(n.get("NumContainers"));
Assert.assertNull(n.get("UsedMemoryMB"));
Assert.assertNull(n.get("AvailableMemoryMB"));
}
}
}
| 6,592 | 38.479042 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAppWithCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.CustomOutputCommitter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@SuppressWarnings("deprecation")
public class TestMRAppWithCombiner {
protected static MiniMRYarnCluster mrCluster;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
private static final Log LOG = LogFactory.getLog(TestMRAppWithCombiner.class);
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
@BeforeClass
public static void setup() throws IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName(), 3);
Configuration conf = new Configuration();
mrCluster.init(conf);
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR),
TestMRJobs.APP_JAR);
localFs.setPermission(TestMRJobs.APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
}
@Test
public void testCombinerShouldUpdateTheReporter() throws Exception {
JobConf conf = new JobConf(mrCluster.getConfig());
int numMaps = 5;
int numReds = 2;
Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"testCombinerShouldUpdateTheReporter-in");
Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"testCombinerShouldUpdateTheReporter-out");
createInputOutPutFolder(in, out, numMaps);
conf.setJobName("test-job-with-combiner");
conf.setMapperClass(IdentityMapper.class);
conf.setCombinerClass(MyCombinerToCheckReporter.class);
//conf.setJarByClass(MyCombinerToCheckReporter.class);
conf.setReducerClass(IdentityReducer.class);
DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf);
conf.setOutputCommitter(CustomOutputCommitter.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, in);
FileOutputFormat.setOutputPath(conf, out);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReds);
runJob(conf);
}
static void createInputOutPutFolder(Path inDir, Path outDir, int numMaps)
throws Exception {
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (!fs.exists(inDir)) {
fs.mkdirs(inDir);
}
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
for (int i = 0; i < numMaps; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
}
static boolean runJob(JobConf conf) throws Exception {
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.submitJob(conf);
return jobClient.monitorAndPrintJob(conf, job);
}
class MyCombinerToCheckReporter<K, V> extends IdentityReducer<K, V> {
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
Reporter reporter) throws IOException {
if (Reporter.NULL == reporter) {
Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
}
}
}
}
| 5,605 | 33.819876 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMiniMRProxyUser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import java.net.InetAddress;
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.security.PrivilegedExceptionAction;
public class TestMiniMRProxyUser extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
protected void setUp() throws Exception {
super.setUp();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "/tmp");
}
int taskTrackers = 2;
int dataNodes = 2;
String proxyUser = System.getProperty("user.name");
String proxyGroup = "g";
StringBuilder sb = new StringBuilder();
sb.append("127.0.0.1,localhost");
for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
sb.append(",").append(i.getCanonicalHostName());
}
JobConf conf = new JobConf();
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
conf.set("hadoop.proxyuser." + proxyUser + ".hosts", sb.toString());
conf.set("hadoop.proxyuser." + proxyUser + ".groups", proxyGroup);
String[] userGroups = new String[]{proxyGroup};
UserGroupInformation.createUserForTesting(proxyUser, userGroups);
UserGroupInformation.createUserForTesting("u1", userGroups);
UserGroupInformation.createUserForTesting("u2", new String[]{"gg"});
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
.build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
String nnURI = fileSystem.getUri().toString();
int numDirs = 1;
String[] racks = null;
String[] hosts = null;
mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
protected JobConf getJobConf() {
return mrCluster.createJobConf();
}
@Override
protected void tearDown() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
super.tearDown();
}
private void mrRun() throws Exception {
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
assertTrue(runJob.isComplete());
assertTrue(runJob.isSuccessful());
}
public void __testCurrentUser() throws Exception {
mrRun();
}
public void testValidProxyUser() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createProxyUser("u1", UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
mrRun();
return null;
}
});
}
public void ___testInvalidProxyUser() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createProxyUser("u2", UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
try {
mrRun();
fail();
}
catch (RemoteException ex) {
//nop
}
catch (Exception ex) {
fail();
}
return null;
}
});
}
}
| 5,838 | 34.387879 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestNonExistentJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.security.authorize.ProxyUsers;
import java.io.IOException;
import java.net.InetAddress;
public class TestNonExistentJob extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
protected void setUp() throws Exception {
super.setUp();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "/tmp");
}
int taskTrackers = 2;
int dataNodes = 2;
String proxyUser = System.getProperty("user.name");
String proxyGroup = "g";
StringBuilder sb = new StringBuilder();
sb.append("127.0.0.1,localhost");
for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
sb.append(",").append(i.getCanonicalHostName());
}
JobConf conf = new JobConf();
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
.build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
String nnURI = fileSystem.getUri().toString();
int numDirs = 1;
String[] racks = null;
String[] hosts = null;
mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
protected JobConf getJobConf() {
return mrCluster.createJobConf();
}
@Override
protected void tearDown() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
super.tearDown();
}
public void testGetInvalidJob() throws Exception {
RunningJob runJob = new JobClient(getJobConf()).getJob(JobID.forName("job_0_0"));
assertNull(runJob);
}
}
| 3,571 | 35.080808 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import org.junit.Assert;
import org.apache.avro.AvroRemoteException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.util.Records;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestMRJobsWithHistoryService {
private static final Log LOG =
LogFactory.getLog(TestMRJobsWithHistoryService.class);
private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES =
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
private static MiniMRYarnCluster mrCluster;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR = new Path("target",
TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs);
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@Before
public void setup() throws InterruptedException, IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(getClass().getName());
mrCluster.init(new Configuration());
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@After
public void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster != null) {
mrCluster.stop();
}
}
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
AvroRemoteException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(mrCluster.getConfig());
// Job with 3 maps and 2 reduces
Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.waitForCompletion(true);
Counters counterMR = job.getCounters();
JobId jobId = TypeConverter.toYarn(job.getJobID());
ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(
mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
.getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
.getRMContext().getRMApps().get(appID).getState());
Counters counterHS = job.getCounters();
//TODO the Assert below worked. need to check
//Should we compare each field or convert to V2 counter and compare
LOG.info("CounterHS " + counterHS);
LOG.info("CounterMR " + counterMR);
Assert.assertEquals(counterHS, counterMR);
HSClientProtocol historyClient = instantiateHistoryProxy();
GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
gjReq.setJobId(jobId);
JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
verifyJobReport(jobReport, jobId);
}
private void verifyJobReport(JobReport jobReport, JobId jobId) {
List<AMInfo> amInfos = jobReport.getAMInfos();
Assert.assertEquals(1, amInfos.size());
AMInfo amInfo = amInfos.get(0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(jobId.getAppId(), 1);
ContainerId amContainerId = ContainerId.newContainerId(appAttemptId, 1);
Assert.assertEquals(appAttemptId, amInfo.getAppAttemptId());
Assert.assertEquals(amContainerId, amInfo.getContainerId());
Assert.assertTrue(jobReport.getSubmitTime() > 0);
Assert.assertTrue(jobReport.getStartTime() > 0
&& jobReport.getStartTime() >= jobReport.getSubmitTime());
Assert.assertTrue(jobReport.getFinishTime() > 0
&& jobReport.getFinishTime() >= jobReport.getStartTime());
}
private HSClientProtocol instantiateHistoryProxy() {
final String serviceAddr =
mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
final YarnRPC rpc = YarnRPC.create(conf);
HSClientProtocol historyClient =
(HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), mrCluster.getConfig());
return historyClient;
}
}
| 7,164 | 36.317708 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestUberAM extends TestMRJobs {
private static final Log LOG = LogFactory.getLog(TestUberAM.class);
@BeforeClass
public static void setup() throws IOException {
TestMRJobs.setup();
if (mrCluster != null) {
mrCluster.getConfig().setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
mrCluster.getConfig().setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 3);
}
}
@Override
@Test
public void testSleepJob()
throws Exception {
numSleepReducers = 1;
super.testSleepJob();
}
@Test
public void testSleepJobWithMultipleReducers()
throws Exception {
numSleepReducers = 3;
super.testSleepJob();
}
@Override
protected void verifySleepJobCounters(Job job) throws InterruptedException,
IOException {
Counters counters = job.getCounters();
super.verifySleepJobCounters(job);
Assert.assertEquals(3,
counters.findCounter(JobCounter.NUM_UBER_SUBMAPS).getValue());
Assert.assertEquals(numSleepReducers,
counters.findCounter(JobCounter.NUM_UBER_SUBREDUCES).getValue());
Assert.assertEquals(3 + numSleepReducers,
counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
@Override
@Test
public void testRandomWriter()
throws IOException, InterruptedException, ClassNotFoundException {
super.testRandomWriter();
}
@Override
protected void verifyRandomWriterCounters(Job job)
throws InterruptedException, IOException {
super.verifyRandomWriterCounters(job);
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
.getValue());
Assert.assertEquals(3,
counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
@Override
@Test
public void testFailingMapper()
throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting uberized testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
Job job = runFailingMapperJob();
// should be able to get diags for single task attempt...
TaskID taskID = new TaskID(job.getJobID(), TaskType.MAP, 0);
TaskAttemptID aId = new TaskAttemptID(taskID, 0);
System.out.println("Diagnostics for " + aId + " :");
for (String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
// ...but not for second (shouldn't exist: uber-AM overrode max attempts)
boolean secondTaskAttemptExists = true;
try {
aId = new TaskAttemptID(taskID, 1);
System.out.println("Diagnostics for " + aId + " :");
for (String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
} catch (Exception e) {
secondTaskAttemptExists = false;
}
Assert.assertEquals(false, secondTaskAttemptExists);
TaskCompletionEvent[] events = job.getTaskCompletionEvents(0, 2);
Assert.assertEquals(1, events.length);
// TIPFAILED if it comes from the AM, FAILED if it comes from the JHS
TaskCompletionEvent.Status status = events[0].getStatus();
Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED ||
status == TaskCompletionEvent.Status.TIPFAILED);
Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());
//Disabling till UberAM honors MRJobConfig.MAP_MAX_ATTEMPTS
//verifyFailingMapperCounters(job);
// TODO later: add explicit "isUber()" checks of some sort
}
@Override
protected void verifyFailingMapperCounters(Job job)
throws InterruptedException, IOException {
Counters counters = job.getCounters();
super.verifyFailingMapperCounters(job);
Assert.assertEquals(2,
counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
.getValue());
Assert.assertEquals(2, counters
.findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue());
}
//@Test //FIXME: if/when the corresponding TestMRJobs test gets enabled, do so here as well (potentially with mods for ubermode)
public void testSleepJobWithSecurityOn()
throws IOException, InterruptedException, ClassNotFoundException {
super.testSleepJobWithSecurityOn();
}
}
| 5,865 | 34.551515 | 130 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRAMWithNonNormalizedCapabilities.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestMRAMWithNonNormalizedCapabilities {
private static final Log LOG = LogFactory.getLog(TestMRAMWithNonNormalizedCapabilities.class);
private static FileSystem localFs;
protected static MiniMRYarnCluster mrCluster = null;
private static Configuration conf = new Configuration();
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR = new Path("target",
TestMRAMWithNonNormalizedCapabilities.class.getName() + "-tmpDir")
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@Before
public void setup() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(getClass().getSimpleName());
mrCluster.init(new Configuration());
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
JobConf jobConf = new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb", 700);
jobConf.setInt("mapred.reduce.memory.mb", 1500);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(jobConf);
Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.submit();
boolean completed = job.waitForCompletion(true);
Assert.assertTrue("Job should be completed", completed);
Assert.assertEquals("Job should be finished successfully",
JobStatus.State.SUCCEEDED, job.getJobState());
}
@After
public void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster != null) {
mrCluster.stop();
}
}
}
| 4,210 | 33.516393 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapred.LocalContainerLauncher;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
/**
* Configures and starts the MR-specific components in the YARN cluster.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MiniMRYarnCluster extends MiniYARNCluster {
public static final String APPJAR = JarFinder.getJar(LocalContainerLauncher.class);
private static final Log LOG = LogFactory.getLog(MiniMRYarnCluster.class);
private JobHistoryServer historyServer;
private JobHistoryServerWrapper historyServerWrapper;
public MiniMRYarnCluster(String testName) {
this(testName, 1);
}
public MiniMRYarnCluster(String testName, int noOfNMs) {
this(testName, noOfNMs, false);
}
public MiniMRYarnCluster(String testName, int noOfNMs, boolean enableAHS) {
super(testName, 1, noOfNMs, 4, 4, enableAHS);
historyServerWrapper = new JobHistoryServerWrapper();
addService(historyServerWrapper);
}
public static String getResolvedMRHistoryWebAppURLWithoutScheme(
Configuration conf, boolean isSSLEnabled) {
InetSocketAddress address = null;
if (isSSLEnabled) {
address =
conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT);
} else {
address =
conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT); }
address = NetUtils.getConnectAddress(address);
StringBuffer sb = new StringBuffer();
InetAddress resolved = address.getAddress();
if (resolved == null || resolved.isAnyLocalAddress() ||
resolved.isLoopbackAddress()) {
String lh = address.getHostName();
try {
lh = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
//Ignore and fallback.
}
sb.append(lh);
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
return sb.toString();
}
@Override
public void serviceInit(Configuration conf) throws Exception {
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(),
"apps_staging_dir/").getAbsolutePath());
}
// By default, VMEM monitoring disabled, PMEM monitoring enabled.
if (!conf.getBoolean(
MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
}
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
try {
Path stagingPath = FileContext.getFileContext(conf).makeQualified(
new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
/*
* Re-configure the staging path on Windows if the file system is localFs.
* We need to use a absolute path that contains the drive letter. The unit
* test could run on a different drive than the AM. We can run into the
* issue that job files are localized to the drive where the test runs on,
* while the AM starts on a different drive and fails to find the job
* metafiles. Using absolute path can avoid this ambiguity.
*/
if (Path.WINDOWS) {
if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
conf.set(MRJobConfig.MR_AM_STAGING_DIR,
new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR))
.getAbsolutePath());
}
}
FileContext fc=FileContext.getFileContext(stagingPath.toUri(), conf);
if (fc.util().exists(stagingPath)) {
LOG.info(stagingPath + " exists! deleting...");
fc.delete(stagingPath, true);
}
LOG.info("mkdir: " + stagingPath);
//mkdir the staging directory so that right permissions are set while running as proxy user
fc.mkdir(stagingPath, null, true);
//mkdir done directory as well
String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
Path doneDirPath = fc.makeQualified(new Path(doneDir));
fc.mkdir(doneDirPath, null, true);
} catch (IOException e) {
throw new YarnRuntimeException("Could not create staging directory. ", e);
}
conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
// which shuffle doesn't happen
//configure the shuffle service in NM
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,
ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class,
Service.class);
// Non-standard shuffle port
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR,
DefaultContainerExecutor.class, ContainerExecutor.class);
// TestMRJobs is for testing non-uberized operation only; see TestUberAM
// for corresponding uberized tests.
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
super.serviceInit(conf);
}
private class JobHistoryServerWrapper extends AbstractService {
public JobHistoryServerWrapper() {
super(JobHistoryServerWrapper.class.getName());
}
@Override
public synchronized void serviceStart() throws Exception {
try {
if (!getConfig().getBoolean(
JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS,
JHAdminConfig.DEFAULT_MR_HISTORY_MINICLUSTER_FIXED_PORTS)) {
String hostname = MiniYARNCluster.getHostname();
// pick free random ports.
getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
hostname + ":0");
MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(), hostname
+ ":0");
getConfig().set(JHAdminConfig.JHS_ADMIN_ADDRESS,
hostname + ":0");
}
historyServer = new JobHistoryServer();
historyServer.init(getConfig());
new Thread() {
public void run() {
historyServer.start();
};
}.start();
while (historyServer.getServiceState() == STATE.INITED) {
LOG.info("Waiting for HistoryServer to start...");
Thread.sleep(1500);
}
//TODO Add a timeout. State.STOPPED check ?
if (historyServer.getServiceState() != STATE.STARTED) {
throw new IOException("HistoryServer failed to start");
}
super.serviceStart();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
//need to do this because historyServer.init creates a new Configuration
getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
LOG.info("MiniMRYARN ResourceManager address: " +
getConfig().get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniMRYARN ResourceManager web address: " +
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
LOG.info("MiniMRYARN HistoryServer address: " +
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
LOG.info("MiniMRYARN HistoryServer web address: "
+ getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
}
@Override
public synchronized void serviceStop() throws Exception {
if (historyServer != null) {
historyServer.stop();
}
super.serviceStop();
}
}
public JobHistoryServer getHistoryServer() {
return this.historyServer;
}
}
| 10,617 | 40.155039 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecutionWithMRApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Test;
import com.google.common.base.Supplier;
@SuppressWarnings({ "unchecked", "rawtypes" })
public class TestSpeculativeExecutionWithMRApp {
private static final int NUM_MAPPERS = 5;
private static final int NUM_REDUCERS = 0;
@Test
public void testSpeculateSuccessfulWithoutUpdateEvents() throws Exception {
Clock actualClock = new SystemClock();
final ControlledClock clock = new ControlledClock(actualClock);
clock.setTime(System.currentTimeMillis());
MRApp app =
new MRApp(NUM_MAPPERS, NUM_REDUCERS, false, "test", true, clock);
Job job = app.submit(new Configuration(), true, true);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", NUM_MAPPERS + NUM_REDUCERS,
tasks.size());
Iterator<Task> taskIter = tasks.values().iterator();
while (taskIter.hasNext()) {
app.waitForState(taskIter.next(), TaskState.RUNNING);
}
// Process the update events
clock.setTime(System.currentTimeMillis() + 2000);
EventHandler appEventHandler = app.getContext().getEventHandler();
for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask
.getValue().getAttempts().entrySet()) {
TaskAttemptStatus status =
createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.8,
TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event =
new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
Random generator = new Random();
Object[] taskValues = tasks.values().toArray();
final Task taskToBeSpeculated =
(Task) taskValues[generator.nextInt(taskValues.length)];
// Other than one random task, finish every other task.
for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask
.getValue().getAttempts().entrySet()) {
if (mapTask.getKey() != taskToBeSpeculated.getID()) {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
}
}
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
if (taskToBeSpeculated.getAttempts().size() != 2) {
clock.setTime(System.currentTimeMillis() + 1000);
return false;
} else {
return true;
}
}
}, 1000, 60000);
// finish 1st TA, 2nd will be killed
TaskAttempt[] ta = makeFirstAttemptWin(appEventHandler, taskToBeSpeculated);
verifySpeculationMessage(app, ta);
app.waitForState(Service.STATE.STOPPED);
}
@Test
public void testSepculateSuccessfulWithUpdateEvents() throws Exception {
Clock actualClock = new SystemClock();
final ControlledClock clock = new ControlledClock(actualClock);
clock.setTime(System.currentTimeMillis());
MRApp app =
new MRApp(NUM_MAPPERS, NUM_REDUCERS, false, "test", true, clock);
Job job = app.submit(new Configuration(), true, true);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", NUM_MAPPERS + NUM_REDUCERS,
tasks.size());
Iterator<Task> taskIter = tasks.values().iterator();
while (taskIter.hasNext()) {
app.waitForState(taskIter.next(), TaskState.RUNNING);
}
// Process the update events
clock.setTime(System.currentTimeMillis() + 1000);
EventHandler appEventHandler = app.getContext().getEventHandler();
for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask
.getValue().getAttempts().entrySet()) {
TaskAttemptStatus status =
createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.5,
TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event =
new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
Task speculatedTask = null;
int numTasksToFinish = NUM_MAPPERS + NUM_REDUCERS - 1;
clock.setTime(System.currentTimeMillis() + 1000);
for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue()
.getAttempts().entrySet()) {
if (numTasksToFinish > 0) {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
numTasksToFinish--;
app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
} else {
// The last task is chosen for speculation
TaskAttemptStatus status =
createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75,
TaskAttemptState.RUNNING);
speculatedTask = task.getValue();
TaskAttemptStatusUpdateEvent event =
new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
}
clock.setTime(System.currentTimeMillis() + 15000);
for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue()
.getAttempts().entrySet()) {
if (taskAttempt.getValue().getState() != TaskAttemptState.SUCCEEDED) {
TaskAttemptStatus status =
createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75,
TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event =
new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
}
final Task speculatedTaskConst = speculatedTask;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
if (speculatedTaskConst.getAttempts().size() != 2) {
clock.setTime(System.currentTimeMillis() + 1000);
return false;
} else {
return true;
}
}
}, 1000, 60000);
TaskAttempt[] ta = makeFirstAttemptWin(appEventHandler, speculatedTask);
verifySpeculationMessage(app, ta);
app.waitForState(Service.STATE.STOPPED);
}
private static TaskAttempt[] makeFirstAttemptWin(
EventHandler appEventHandler, Task speculatedTask) {
// finish 1st TA, 2nd will be killed
Collection<TaskAttempt> attempts = speculatedTask.getAttempts().values();
TaskAttempt[] ta = new TaskAttempt[attempts.size()];
attempts.toArray(ta);
appEventHandler.handle(
new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(),
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
return ta;
}
private static void verifySpeculationMessage(MRApp app, TaskAttempt[] ta)
throws Exception {
app.waitForState(ta[0], TaskAttemptState.SUCCEEDED);
// The speculative attempt may be not killed before the MR job succeeds.
}
private TaskAttemptStatus createTaskAttemptStatus(TaskAttemptId id,
float progress, TaskAttemptState state) {
TaskAttemptStatus status = new TaskAttemptStatus();
status.id = id;
status.progress = progress;
status.taskState = state;
return status;
}
}
| 10,062 | 39.252 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMROldApiJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.CustomOutputCommitter;
import org.apache.hadoop.FailMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMROldApiJobs {
private static final Log LOG = LogFactory.getLog(TestMROldApiJobs.class);
protected static MiniMRYarnCluster mrCluster;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
@BeforeClass
public static void setup() throws IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestMROldApiJobs.class.getName());
mrCluster.init(new Configuration());
mrCluster.start();
}
// TestMRJobs is for testing non-uberized operation only; see TestUberAM
// for corresponding uberized tests.
mrCluster.getConfig().setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), TestMRJobs.APP_JAR);
localFs.setPermission(TestMRJobs.APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
}
@Test
public void testJobSucceed() throws IOException, InterruptedException,
ClassNotFoundException {
LOG.info("\n\n\nStarting testJobSucceed().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
JobConf conf = new JobConf(mrCluster.getConfig());
Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"in");
Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"out");
runJobSucceed(conf, in, out);
FileSystem fs = FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
@Test
public void testJobFail() throws IOException, InterruptedException,
ClassNotFoundException {
LOG.info("\n\n\nStarting testJobFail().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
JobConf conf = new JobConf(mrCluster.getConfig());
Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"fail-in");
Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
"fail-out");
runJobFail(conf, in, out);
FileSystem fs = FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out, CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out, CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
//Run a job that will be failed and wait until it completes
public static void runJobFail(JobConf conf, Path inDir, Path outDir)
throws IOException, InterruptedException {
conf.setJobName("test-job-fail");
conf.setMapperClass(FailMapper.class);
conf.setJarByClass(FailMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setMaxMapAttempts(1);
boolean success = runJob(conf, inDir, outDir, 1, 0);
Assert.assertFalse("Job expected to fail succeeded", success);
}
//Run a job that will be succeeded and wait until it completes
public static void runJobSucceed(JobConf conf, Path inDir, Path outDir)
throws IOException, InterruptedException {
conf.setJobName("test-job-succeed");
conf.setMapperClass(IdentityMapper.class);
//conf.setJar(new File(MiniMRYarnCluster.APPJAR).getAbsolutePath());
conf.setReducerClass(IdentityReducer.class);
boolean success = runJob(conf, inDir, outDir, 1 , 1);
Assert.assertTrue("Job expected to succeed failed", success);
}
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds) throws IOException, InterruptedException {
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (!fs.exists(inDir)) {
fs.mkdirs(inDir);
}
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
for (int i = 0; i < numMaps; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
conf.setOutputCommitter(CustomOutputCommitter.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReds);
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.submitJob(conf);
return jobClient.monitorAndPrintJob(conf, job);
}
}
| 8,093 | 36.472222 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.junit.AfterClass;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMRJobsWithProfiler {
private static final Log LOG =
LogFactory.getLog(TestMRJobsWithProfiler.class);
private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES =
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
private static final int PROFILED_TASK_ID = 1;
private static MiniMRYarnCluster mrCluster;
private static final Configuration CONF = new Configuration();
private static final FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(CONF);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static final Path TEST_ROOT_DIR =
new Path("target", TestMRJobs.class.getName() + "-tmpDir").
makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
private static final Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@BeforeClass
public static void setup() throws InterruptedException, IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestMRJobsWithProfiler.class.getName());
mrCluster.init(CONF);
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster != null) {
mrCluster.stop();
}
}
@Test (timeout = 150000)
public void testDefaultProfiler() throws Exception {
LOG.info("Starting testDefaultProfiler");
testProfilerInternal(true);
}
@Test (timeout = 150000)
public void testDifferentProfilers() throws Exception {
LOG.info("Starting testDefaultProfiler");
testProfilerInternal(false);
}
private void testProfilerInternal(boolean useDefault) throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
final SleepJob sleepJob = new SleepJob();
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.setProfileEnabled(true);
sleepConf.setProfileTaskRange(true, String.valueOf(PROFILED_TASK_ID));
sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID));
if (!useDefault) {
// use hprof for map to profile.out
sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
"-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
+ "file=%s");
// use Xprof for reduce to stdout
sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
}
sleepJob.setConf(sleepConf);
// 2-map-2-reduce SleepJob
final Job job = sleepJob.createJob(2, 2, 500, 1, 500, 1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.waitForCompletion(true);
final JobId jobId = TypeConverter.toYarn(job.getJobID());
final ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(
mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
.getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
.getRMContext().getRMApps().get(appID).getState());
// Job finished, verify logs
//
final Configuration nmConf = mrCluster.getNodeManager(0).getConfig();
final String appIdStr = appID.toString();
final String appIdSuffix = appIdStr.substring(
"application_".length(), appIdStr.length());
final String containerGlob = "container_" + appIdSuffix + "_*_*";
final Map<TaskAttemptID,Path> taLogDirs = new HashMap<TaskAttemptID,Path>();
final Pattern taskPattern = Pattern.compile(
".*Task:(attempt_"
+ appIdSuffix + "_[rm]_" + "[0-9]+_[0-9]+).*");
for (String logDir :
nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS))
{
// filter out MRAppMaster and create attemptId->logDir map
//
for (FileStatus fileStatus :
localFs.globStatus(new Path(logDir
+ Path.SEPARATOR + appIdStr
+ Path.SEPARATOR + containerGlob
+ Path.SEPARATOR + TaskLog.LogName.SYSLOG)))
{
final BufferedReader br = new BufferedReader(
new InputStreamReader(localFs.open(fileStatus.getPath())));
String line;
while ((line = br.readLine()) != null) {
final Matcher m = taskPattern.matcher(line);
if (m.matches()) {
// found Task done message
taLogDirs.put(TaskAttemptID.forName(m.group(1)),
fileStatus.getPath().getParent());
break;
}
}
br.close();
}
}
Assert.assertEquals(4, taLogDirs.size()); // all 4 attempts found
for (Map.Entry<TaskAttemptID,Path> dirEntry : taLogDirs.entrySet()) {
final TaskAttemptID tid = dirEntry.getKey();
final Path profilePath = new Path(dirEntry.getValue(),
TaskLog.LogName.PROFILE.toString());
final Path stdoutPath = new Path(dirEntry.getValue(),
TaskLog.LogName.STDOUT.toString());
if (useDefault || tid.getTaskType() == TaskType.MAP) {
if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
// verify profile.out
final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(profilePath)));
final String line = br.readLine();
Assert.assertTrue("No hprof content found!",
line !=null && line.startsWith("JAVA PROFILE"));
br.close();
Assert.assertEquals(0L, localFs.getFileStatus(stdoutPath).getLen());
} else {
Assert.assertFalse("hprof file should not exist",
localFs.exists(profilePath));
}
} else {
Assert.assertFalse("hprof file should not exist",
localFs.exists(profilePath));
if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
// reducer is profiled with Xprof
final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(stdoutPath)));
boolean flatProfFound = false;
String line;
while ((line = br.readLine()) != null) {
if (line.startsWith("Flat profile")) {
flatProfFound = true;
break;
}
}
br.close();
Assert.assertTrue("Xprof flat profile not found!", flatProfFound);
} else {
Assert.assertEquals(0L, localFs.getFileStatus(stdoutPath).getLen());
}
}
}
}
}
| 9,053 | 34.093023 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.StringReader;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.FailingMapper;
import org.apache.hadoop.RandomTextWriterJob;
import org.apache.hadoop.RandomTextWriterJob.RandomInputFormat;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.mapreduce.SleepJob.SleepMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRAppMaster;
import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ApplicationClassLoader;
import org.apache.hadoop.util.ClassUtil;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMRJobs {
private static final Log LOG = LogFactory.getLog(TestMRJobs.class);
private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES =
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
private static final int NUM_NODE_MGRS = 3;
private static final String TEST_IO_SORT_MB = "11";
private static final String TEST_GROUP_MAX = "200";
private static final int DEFAULT_REDUCES = 2;
protected int numSleepReducers = DEFAULT_REDUCES;
protected static MiniMRYarnCluster mrCluster;
protected static MiniDFSCluster dfsCluster;
private static Configuration conf = new Configuration();
private static FileSystem localFs;
private static FileSystem remoteFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR = new Path("target",
TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs);
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
private static final String OUTPUT_ROOT_DIR = "/tmp/" +
TestMRJobs.class.getSimpleName();
@BeforeClass
public static void setup() throws IOException {
try {
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(true).racks(null).build();
remoteFs = dfsCluster.getFileSystem();
} catch (IOException io) {
throw new RuntimeException("problem starting mini dfs cluster", io);
}
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestMRJobs.class.getName(),
NUM_NODE_MGRS);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");
mrCluster.init(conf);
mrCluster.start();
}
// Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
// workaround the absent public discache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
if (dfsCluster != null) {
dfsCluster.shutdown();
dfsCluster = null;
}
}
@After
public void resetInit() {
numSleepReducers = DEFAULT_REDUCES;
}
@Test (timeout = 300000)
public void testSleepJob() throws Exception {
testSleepJobInternal(false);
}
@Test (timeout = 300000)
public void testSleepJobWithRemoteJar() throws Exception {
testSleepJobInternal(true);
}
private void testSleepJobInternal(boolean useRemoteJar) throws Exception {
LOG.info("\n\n\nStarting testSleepJob: useRemoteJar=" + useRemoteJar);
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
Configuration sleepConf = new Configuration(mrCluster.getConfig());
// set master address to local to test that local mode applied iff framework == local
sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(sleepConf);
// job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
Job job = sleepJob.createJob(3, numSleepReducers, 10000, 1, 5000, 1);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
if (useRemoteJar) {
final Path localJar = new Path(
ClassUtil.findContainingJar(SleepJob.class));
ConfigUtil.addLink(job.getConfiguration(), "/jobjars",
localFs.makeQualified(localJar.getParent()).toUri());
job.setJar("viewfs:///jobjars/" + localJar.getName());
} else {
job.setJarByClass(SleepJob.class);
}
job.setMaxMapAttempts(1); // speed up failures
job.submit();
String trackingUrl = job.getTrackingURL();
String jobId = job.getJobID().toString();
boolean succeeded = job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl +
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
verifySleepJobCounters(job);
verifyTaskProgress(job);
// TODO later: add explicit "isUber()" checks of some sort (extend
// JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
}
@Test(timeout = 300000)
public void testConfVerificationWithClassloader() throws Exception {
testConfVerification(true, false, false, false);
}
@Test(timeout = 300000)
public void testConfVerificationWithClassloaderCustomClasses()
throws Exception {
testConfVerification(true, true, false, false);
}
@Test(timeout = 300000)
public void testConfVerificationWithOutClassloader() throws Exception {
testConfVerification(false, false, false, false);
}
@Test(timeout = 300000)
public void testConfVerificationWithJobClient() throws Exception {
testConfVerification(false, false, true, false);
}
@Test(timeout = 300000)
public void testConfVerificationWithJobClientLocal() throws Exception {
testConfVerification(false, false, true, true);
}
private void testConfVerification(boolean useJobClassLoader,
boolean useCustomClasses, boolean useJobClientForMonitring,
boolean useLocal) throws Exception {
LOG.info("\n\n\nStarting testConfVerification()"
+ " jobClassloader=" + useJobClassLoader
+ " customClasses=" + useCustomClasses
+ " jobClient=" + useJobClientForMonitring
+ " localMode=" + useLocal);
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
final Configuration clusterConfig;
if (useLocal) {
clusterConfig = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
} else {
clusterConfig = mrCluster.getConfig();
}
final JobClient jc = new JobClient(clusterConfig);
final Configuration sleepConf = new Configuration(clusterConfig);
// set master address to local to test that local mode applied iff framework == local
sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,
useJobClassLoader);
if (useCustomClasses) {
// to test AM loading user classes such as output format class, we want
// to blacklist them from the system classes (they need to be prepended
// as the first match wins)
String systemClasses = ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
// exclude the custom classes from system classes
systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
CustomSpeculator.class.getName() + "," +
systemClasses;
sleepConf.set(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES,
systemClasses);
}
sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
sleepConf.set(MRJobConfig.COUNTER_GROUPS_MAX_KEY, TEST_GROUP_MAX);
final SleepJob sleepJob = new SleepJob();
sleepJob.setConf(sleepConf);
final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1);
job.setMapperClass(ConfVerificationMapper.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.setJarByClass(SleepJob.class);
job.setMaxMapAttempts(1); // speed up failures
if (useCustomClasses) {
// set custom output format class and speculator class
job.setOutputFormatClass(CustomOutputFormat.class);
final Configuration jobConf = job.getConfiguration();
jobConf.setClass(MRJobConfig.MR_AM_JOB_SPECULATOR, CustomSpeculator.class,
Speculator.class);
// speculation needs to be enabled for the speculator to be loaded
jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
}
job.submit();
final boolean succeeded;
if (useJobClientForMonitring && !useLocal) {
// We can't use getJobID in useLocal case because JobClient and Job
// point to different instances of LocalJobRunner
//
final JobID mapredJobID = JobID.downgrade(job.getJobID());
RunningJob runningJob = null;
do {
Thread.sleep(10);
runningJob = jc.getJob(mapredJobID);
} while (runningJob == null);
Assert.assertEquals("Unexpected RunningJob's "
+ MRJobConfig.COUNTER_GROUPS_MAX_KEY,
TEST_GROUP_MAX, runningJob.getConfiguration()
.get(MRJobConfig.COUNTER_GROUPS_MAX_KEY));
runningJob.waitForCompletion();
succeeded = runningJob.isSuccessful();
} else {
succeeded = job.waitForCompletion(true);
}
Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(),
succeeded);
}
public static class CustomOutputFormat<K,V> extends NullOutputFormat<K,V> {
public CustomOutputFormat() {
verifyClassLoader(getClass());
}
/**
* Verifies that the class was loaded by the job classloader if it is in the
* context of the MRAppMaster, and if not throws an exception to fail the
* job.
*/
private void verifyClassLoader(Class<?> cls) {
// to detect that it is instantiated in the context of the MRAppMaster, we
// inspect the stack trace and determine a caller is MRAppMaster
for (StackTraceElement e: new Throwable().getStackTrace()) {
if (e.getClassName().equals(MRAppMaster.class.getName()) &&
!(cls.getClassLoader() instanceof ApplicationClassLoader)) {
throw new ExceptionInInitializerError("incorrect classloader used");
}
}
}
}
public static class CustomSpeculator extends DefaultSpeculator {
public CustomSpeculator(Configuration conf, AppContext context) {
super(conf, context);
verifyClassLoader(getClass());
}
/**
* Verifies that the class was loaded by the job classloader if it is in the
* context of the MRAppMaster, and if not throws an exception to fail the
* job.
*/
private void verifyClassLoader(Class<?> cls) {
// to detect that it is instantiated in the context of the MRAppMaster, we
// inspect the stack trace and determine a caller is MRAppMaster
for (StackTraceElement e: new Throwable().getStackTrace()) {
if (e.getClassName().equals(MRAppMaster.class.getName()) &&
!(cls.getClassLoader() instanceof ApplicationClassLoader)) {
throw new ExceptionInInitializerError("incorrect classloader used");
}
}
}
}
protected void verifySleepJobCounters(Job job) throws InterruptedException,
IOException {
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
.getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(numSleepReducers,
counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
protected void verifyTaskProgress(Job job) throws InterruptedException,
IOException {
for (TaskReport taskReport : job.getTaskReports(TaskType.MAP)) {
Assert.assertTrue(0.9999f < taskReport.getProgress()
&& 1.0001f > taskReport.getProgress());
}
for (TaskReport taskReport : job.getTaskReports(TaskType.REDUCE)) {
Assert.assertTrue(0.9999f < taskReport.getProgress()
&& 1.0001f > taskReport.getProgress());
}
}
@Test (timeout = 60000)
public void testRandomWriter() throws IOException, InterruptedException,
ClassNotFoundException {
LOG.info("\n\n\nStarting testRandomWriter().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
RandomTextWriterJob randomWriterJob = new RandomTextWriterJob();
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
Job job = randomWriterJob.createJob(mrCluster.getConfig());
Path outputDir = new Path(OUTPUT_ROOT_DIR, "random-output");
FileOutputFormat.setOutputPath(job, outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.setJarByClass(RandomTextWriterJob.class);
job.setMaxMapAttempts(1); // speed up failures
job.submit();
String trackingUrl = job.getTrackingURL();
String jobId = job.getJobID().toString();
boolean succeeded = job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl +
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
// Make sure there are three files in the output-dir
RemoteIterator<FileStatus> iterator =
FileContext.getFileContext(mrCluster.getConfig()).listStatus(
outputDir);
int count = 0;
while (iterator.hasNext()) {
FileStatus file = iterator.next();
if (!file.getPath().getName()
.equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
count++;
}
}
Assert.assertEquals("Number of part files is wrong!", 3, count);
verifyRandomWriterCounters(job);
// TODO later: add explicit "isUber()" checks of some sort
}
protected void verifyRandomWriterCounters(Job job)
throws InterruptedException, IOException {
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
.getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
@Test (timeout = 60000)
public void testFailingMapper() throws IOException, InterruptedException,
ClassNotFoundException {
LOG.info("\n\n\nStarting testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
Job job = runFailingMapperJob();
TaskID taskID = new TaskID(job.getJobID(), TaskType.MAP, 0);
TaskAttemptID aId = new TaskAttemptID(taskID, 0);
System.out.println("Diagnostics for " + aId + " :");
for (String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
aId = new TaskAttemptID(taskID, 1);
System.out.println("Diagnostics for " + aId + " :");
for (String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
TaskCompletionEvent[] events = job.getTaskCompletionEvents(0, 2);
Assert.assertEquals(TaskCompletionEvent.Status.FAILED,
events[0].getStatus());
Assert.assertEquals(TaskCompletionEvent.Status.TIPFAILED,
events[1].getStatus());
Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());
verifyFailingMapperCounters(job);
// TODO later: add explicit "isUber()" checks of some sort
}
protected void verifyFailingMapperCounters(Job job)
throws InterruptedException, IOException {
Counters counters = job.getCounters();
Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
.getValue());
Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
.getValue());
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
protected Job runFailingMapperJob()
throws IOException, InterruptedException, ClassNotFoundException {
Configuration myConf = new Configuration(mrCluster.getConfig());
myConf.setInt(MRJobConfig.NUM_MAPS, 1);
myConf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2); //reduce the number of attempts
Job job = Job.getInstance(myConf);
job.setJarByClass(FailingMapper.class);
job.setJobName("failmapper");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(FailingMapper.class);
job.setNumReduceTasks(0);
FileOutputFormat.setOutputPath(job, new Path(OUTPUT_ROOT_DIR,
"failmapper-output"));
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.submit();
String trackingUrl = job.getTrackingURL();
String jobId = job.getJobID().toString();
boolean succeeded = job.waitForCompletion(true);
Assert.assertFalse(succeeded);
Assert.assertTrue("Tracking URL was " + trackingUrl +
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
return job;
}
//@Test (timeout = 60000)
public void testSleepJobWithSecurityOn() throws IOException,
InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJobWithSecurityOn().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
return;
}
mrCluster.getConfig().set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
mrCluster.getConfig().set(YarnConfiguration.RM_KEYTAB, "/etc/krb5.keytab");
mrCluster.getConfig().set(YarnConfiguration.NM_KEYTAB, "/etc/krb5.keytab");
mrCluster.getConfig().set(YarnConfiguration.RM_PRINCIPAL,
"rm/sightbusy-lx@LOCALHOST");
mrCluster.getConfig().set(YarnConfiguration.NM_PRINCIPAL,
"nm/sightbusy-lx@LOCALHOST");
UserGroupInformation.setConfiguration(mrCluster.getConfig());
// Keep it in here instead of after RM/NM as multiple user logins happen in
// the same JVM.
UserGroupInformation user = UserGroupInformation.getCurrentUser();
LOG.info("User name is " + user.getUserName());
for (Token<? extends TokenIdentifier> str : user.getTokens()) {
LOG.info("Token is " + str.encodeToUrlString());
}
user.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(mrCluster.getConfig());
Job job = sleepJob.createJob(3, 0, 10000, 1, 0, 0);
// //Job with reduces
// Job job = sleepJob.createJob(3, 2, 10000, 1, 10000, 1);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.submit();
String trackingUrl = job.getTrackingURL();
String jobId = job.getJobID().toString();
job.waitForCompletion(true);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl +
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
return null;
}
});
// TODO later: add explicit "isUber()" checks of some sort
}
@Test(timeout = 120000)
public void testContainerRollingLog() throws IOException,
InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
final SleepJob sleepJob = new SleepJob();
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
final long userLogKb = 4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
final long amLogKb = 7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);
sleepJob.setConf(sleepConf);
final Job job = sleepJob.createJob(1, 0, 1L, 100, 0L, 0);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.waitForCompletion(true);
final JobId jobId = TypeConverter.toYarn(job.getJobID());
final ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(
mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
.getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
.getRMContext().getRMApps().get(appID).getState());
// Job finished, verify logs
//
final String appIdStr = appID.toString();
final String appIdSuffix = appIdStr.substring("application_".length(),
appIdStr.length());
final String containerGlob = "container_" + appIdSuffix + "_*_*";
final String syslogGlob = appIdStr
+ Path.SEPARATOR + containerGlob
+ Path.SEPARATOR + TaskLog.LogName.SYSLOG;
int numAppMasters = 0;
int numMapTasks = 0;
for (int i = 0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf = mrCluster.getNodeManager(i).getConfig();
for (String logDir :
nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob =
new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
final FileStatus[] syslogs = localFs.globStatus(absSyslogGlob);
for (FileStatus slog : syslogs) {
boolean foundAppMaster = job.isUber();
final Path containerPathComponent = slog.getPath().getParent();
if (!foundAppMaster) {
final ContainerId cid = ConverterUtils.toContainerId(
containerPathComponent.getName());
foundAppMaster =
((cid.getContainerId() & ContainerId.CONTAINER_ID_BITMASK)== 1);
}
final FileStatus[] sysSiblings = localFs.globStatus(new Path(
containerPathComponent, TaskLog.LogName.SYSLOG + "*"));
// sort to ensure for i > 0 sysSiblings[i] == "syslog.i"
Arrays.sort(sysSiblings);
if (foundAppMaster) {
numAppMasters++;
} else {
numMapTasks++;
}
if (foundAppMaster) {
Assert.assertSame("Unexpected number of AM sylog* files",
sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS, 0) + 1,
sysSiblings.length);
Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,
sysSiblings[1].getLen() >= amLogKb * 1024);
} else {
Assert.assertSame("Unexpected number of MR task sylog* files",
sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS, 0) + 1,
sysSiblings.length);
Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,
sysSiblings[1].getLen() >= userLogKb * 1024);
}
}
}
}
// Make sure we checked non-empty set
//
Assert.assertEquals("No AppMaster log found!", 1, numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false)) {
Assert.assertEquals("MapTask log with uber found!", 0, numMapTasks);
} else {
Assert.assertEquals("No MapTask log found!", 1, numMapTasks);
}
}
public static class DistributedCacheChecker extends
Mapper<LongWritable, Text, NullWritable, NullWritable> {
@Override
public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
Path[] localFiles = context.getLocalCacheFiles();
URI[] files = context.getCacheFiles();
Path[] localArchives = context.getLocalCacheArchives();
URI[] archives = context.getCacheArchives();
// Check that 4 (2 + appjar + DistrubutedCacheChecker jar) files
// and 2 archives are present
Assert.assertEquals(4, localFiles.length);
Assert.assertEquals(4, files.length);
Assert.assertEquals(2, localArchives.length);
Assert.assertEquals(2, archives.length);
// Check lengths of the files
Map<String, Path> filesMap = pathsToMap(localFiles);
Assert.assertTrue(filesMap.containsKey("distributed.first.symlink"));
Assert.assertEquals(1, localFs.getFileStatus(
filesMap.get("distributed.first.symlink")).getLen());
Assert.assertTrue(filesMap.containsKey("distributed.second.jar"));
Assert.assertTrue(localFs.getFileStatus(
filesMap.get("distributed.second.jar")).getLen() > 1);
// Check extraction of the archive
Map<String, Path> archivesMap = pathsToMap(localArchives);
Assert.assertTrue(archivesMap.containsKey("distributed.third.jar"));
Assert.assertTrue(localFs.exists(new Path(
archivesMap.get("distributed.third.jar"), "distributed.jar.inside3")));
Assert.assertTrue(archivesMap.containsKey("distributed.fourth.jar"));
Assert.assertTrue(localFs.exists(new Path(
archivesMap.get("distributed.fourth.jar"), "distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
ClassLoader cl = Thread.currentThread().getContextClassLoader();
// Both the file and the archive should have been added to classpath, so
// both should be reachable via the class loader.
Assert.assertNotNull(cl.getResource("distributed.jar.inside2"));
Assert.assertNotNull(cl.getResource("distributed.jar.inside3"));
Assert.assertNotNull(cl.getResource("distributed.jar.inside4"));
// The Job Jar should have been extracted to a folder named "job.jar" and
// added to the classpath; the two jar files in the lib folder in the Job
// Jar should have also been added to the classpath
Assert.assertNotNull(cl.getResource("job.jar/"));
Assert.assertNotNull(cl.getResource("job.jar/lib/lib1.jar"));
Assert.assertNotNull(cl.getResource("job.jar/lib/lib2.jar"));
// Check that the symlink for the renaming was created in the cwd;
File symlinkFile = new File("distributed.first.symlink");
Assert.assertTrue(symlinkFile.exists());
Assert.assertEquals(1, symlinkFile.length());
// Check that the symlink for the Job Jar was created in the cwd and
// points to the extracted directory
File jobJarDir = new File("job.jar");
if (Shell.WINDOWS) {
Assert.assertTrue(isWindowsSymlinkedDirectory(jobJarDir));
} else {
Assert.assertTrue(FileUtils.isSymlink(jobJarDir));
Assert.assertTrue(jobJarDir.isDirectory());
}
}
/**
* Used on Windows to determine if the specified file is a symlink that
* targets a directory. On most platforms, these checks can be done using
* commons-io. On Windows, the commons-io implementation is unreliable and
* always returns false. Instead, this method checks the output of the dir
* command. After migrating to Java 7, this method can be removed in favor
* of the new method java.nio.file.Files.isSymbolicLink, which is expected to
* work cross-platform.
*
* @param file File to check
* @return boolean true if the file is a symlink that targets a directory
* @throws IOException thrown for any I/O error
*/
private static boolean isWindowsSymlinkedDirectory(File file)
throws IOException {
String dirOut = Shell.execCommand("cmd", "/c", "dir",
file.getAbsoluteFile().getParent());
StringReader sr = new StringReader(dirOut);
BufferedReader br = new BufferedReader(sr);
try {
String line = br.readLine();
while (line != null) {
line = br.readLine();
if (line.contains(file.getName()) && line.contains("<SYMLINKD>")) {
return true;
}
}
return false;
} finally {
IOUtils.closeStream(br);
IOUtils.closeStream(sr);
}
}
/**
* Returns a mapping of the final component of each path to the corresponding
* Path instance. This assumes that every given Path has a unique string in
* the final path component, which is true for these tests.
*
* @param paths Path[] to map
* @return Map<String, Path> mapping the final component of each path to the
* corresponding Path instance
*/
private static Map<String, Path> pathsToMap(Path[] paths) {
Map<String, Path> map = new HashMap<String, Path>();
for (Path path: paths) {
map.put(path.getName(), path);
}
return map;
}
}
public void _testDistributedCache(String jobJarPath) throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
// Create a temporary file of length 1.
Path first = createTempFile("distributed.first", "x");
// Create two jars with a single file inside them.
Path second =
makeJar(new Path(TEST_ROOT_DIR, "distributed.second.jar"), 2);
Path third =
makeJar(new Path(TEST_ROOT_DIR, "distributed.third.jar"), 3);
Path fourth =
makeJar(new Path(TEST_ROOT_DIR, "distributed.fourth.jar"), 4);
Job job = Job.getInstance(mrCluster.getConfig());
// Set the job jar to a new "dummy" jar so we can check that its extracted
// properly
job.setJar(jobJarPath);
// Because the job jar is a "dummy" jar, we need to include the jar with
// DistributedCacheChecker or it won't be able to find it
Path distributedCacheCheckerJar = new Path(
JarFinder.getJar(DistributedCacheChecker.class));
job.addFileToClassPath(distributedCacheCheckerJar.makeQualified(
localFs.getUri(), distributedCacheCheckerJar.getParent()));
job.setMapperClass(DistributedCacheChecker.class);
job.setOutputFormatClass(NullOutputFormat.class);
FileInputFormat.setInputPaths(job, first);
// Creates the Job Configuration
job.addCacheFile(
new URI(first.toUri().toString() + "#distributed.first.symlink"));
job.addFileToClassPath(second);
// The AppMaster jar itself
job.addFileToClassPath(
APP_JAR.makeQualified(localFs.getUri(), APP_JAR.getParent()));
job.addArchiveToClassPath(third);
job.addCacheArchive(fourth.toUri());
job.setMaxMapAttempts(1); // speed up failures
job.submit();
String trackingUrl = job.getTrackingURL();
String jobId = job.getJobID().toString();
Assert.assertTrue(job.waitForCompletion(false));
Assert.assertTrue("Tracking URL was " + trackingUrl +
" but didn't Match Job ID " + jobId ,
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
}
@Test (timeout = 600000)
public void testDistributedCache() throws Exception {
// Test with a local (file:///) Job Jar
Path localJobJarPath = makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString());
_testDistributedCache(localJobJarPath.toUri().toString());
// Test with a remote (hdfs://) Job Jar
Path remoteJobJarPath = new Path(remoteFs.getUri().toString() + "/",
localJobJarPath.getName());
remoteFs.moveFromLocalFile(localJobJarPath, remoteJobJarPath);
File localJobJarFile = new File(localJobJarPath.toUri().toString());
if (localJobJarFile.exists()) { // just to make sure
localJobJarFile.delete();
}
_testDistributedCache(remoteJobJarPath.toUri().toString());
}
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
FSDataOutputStream os = localFs.create(path);
os.writeBytes(contents);
os.close();
localFs.setPermission(path, new FsPermission("700"));
return path;
}
private Path makeJar(Path p, int index) throws FileNotFoundException,
IOException {
FileOutputStream fos =
new FileOutputStream(new File(p.toUri().getPath()));
JarOutputStream jos = new JarOutputStream(fos);
ZipEntry ze = new ZipEntry("distributed.jar.inside" + index);
jos.putNextEntry(ze);
jos.write(("inside the jar!" + index).getBytes());
jos.closeEntry();
jos.close();
localFs.setPermission(p, new FsPermission("700"));
return p;
}
private Path makeJobJarWithLib(String testDir) throws FileNotFoundException,
IOException{
Path jobJarPath = new Path(testDir, "thejob.jar");
FileOutputStream fos =
new FileOutputStream(new File(jobJarPath.toUri().getPath()));
JarOutputStream jos = new JarOutputStream(fos);
// Have to put in real jar files or it will complain
createAndAddJarToJar(jos, new File(
new Path(testDir, "lib1.jar").toUri().getPath()));
createAndAddJarToJar(jos, new File(
new Path(testDir, "lib2.jar").toUri().getPath()));
jos.close();
localFs.setPermission(jobJarPath, new FsPermission("700"));
return jobJarPath;
}
private void createAndAddJarToJar(JarOutputStream jos, File jarFile)
throws FileNotFoundException, IOException {
FileOutputStream fos2 = new FileOutputStream(jarFile);
JarOutputStream jos2 = new JarOutputStream(fos2);
// Have to have at least one entry or it will complain
ZipEntry ze = new ZipEntry("lib1.inside");
jos2.putNextEntry(ze);
jos2.closeEntry();
jos2.close();
ze = new ZipEntry("lib/" + jarFile.getName());
jos.putNextEntry(ze);
FileInputStream in = new FileInputStream(jarFile);
byte buf[] = new byte[1024];
int numRead;
do {
numRead = in.read(buf);
if (numRead >= 0) {
jos.write(buf, 0, numRead);
}
} while (numRead != -1);
in.close();
jos.closeEntry();
jarFile.delete();
}
public static class ConfVerificationMapper extends SleepMapper {
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
super.setup(context);
final Configuration conf = context.getConfiguration();
// check if the job classloader is enabled and verify the TCCL
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)) {
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
if (!(tccl instanceof ApplicationClassLoader)) {
throw new IOException("TCCL expected: " +
ApplicationClassLoader.class.getName() + ", actual: " +
tccl.getClass().getName());
}
}
final String ioSortMb = conf.get(MRJobConfig.IO_SORT_MB);
if (!TEST_IO_SORT_MB.equals(ioSortMb)) {
throw new IOException("io.sort.mb expected: " + TEST_IO_SORT_MB
+ ", actual: " + ioSortMb);
}
}
@Override
public void map(IntWritable key, IntWritable value, Context context) throws IOException, InterruptedException {
super.map(key, value, context);
for (int i = 0; i < 100; i++) {
context.getCounter("testCounterGroup-" + i,
"testCounter").increment(1);
}
}
}
}
| 41,528 | 39.476608 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecution.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
import org.apache.hadoop.mapreduce.v2.app.speculate.TaskRuntimeEstimator;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSpeculativeExecution {
/*
* This class is used to control when speculative execution happens.
*/
public static class TestSpecEstimator extends LegacyTaskRuntimeEstimator {
private static final long SPECULATE_THIS = 999999L;
public TestSpecEstimator() {
super();
}
/*
* This will only be called if speculative execution is turned on.
*
* If either mapper or reducer speculation is turned on, this will be
* called.
*
* This will cause speculation to engage for the first mapper or first
* reducer (that is, attempt ID "*_m_000000_0" or "*_r_000000_0")
*
* If this attempt is killed, the retry will have attempt id 1, so it
* will not engage speculation again.
*/
@Override
public long estimatedRuntime(TaskAttemptId id) {
if ((id.getTaskId().getId() == 0) && (id.getId() == 0)) {
return SPECULATE_THIS;
}
return super.estimatedRuntime(id);
}
}
private static final Log LOG = LogFactory.getLog(TestSpeculativeExecution.class);
protected static MiniMRYarnCluster mrCluster;
private static Configuration initialConf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(initialConf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static Path TEST_ROOT_DIR =
new Path("target",TestSpeculativeExecution.class.getName() + "-tmpDir")
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
private static Path TEST_OUT_DIR = new Path(TEST_ROOT_DIR, "test.out.dir");
@BeforeClass
public static void setup() throws IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(TestSpeculativeExecution.class.getName(), 4);
Configuration conf = new Configuration();
mrCluster.init(conf);
mrCluster.start();
}
// workaround the absent public distcache.
localFs.copyFromLocalFile(new Path(MiniMRYarnCluster.APPJAR), APP_JAR);
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
@AfterClass
public static void tearDown() {
if (mrCluster != null) {
mrCluster.stop();
mrCluster = null;
}
}
public static class SpeculativeMapper extends
Mapper<Object, Text, Text, IntWritable> {
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
// Make one mapper slower for speculative execution
TaskAttemptID taid = context.getTaskAttemptID();
long sleepTime = 100;
Configuration conf = context.getConfiguration();
boolean test_speculate_map =
conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);
// IF TESTING MAPPER SPECULATIVE EXECUTION:
// Make the "*_m_000000_0" attempt take much longer than the others.
// When speculative execution is enabled, this should cause the attempt
// to be killed and restarted. At that point, the attempt ID will be
// "*_m_000000_1", so sleepTime will still remain 100ms.
if ( (taid.getTaskType() == TaskType.MAP) && test_speculate_map
&& (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
sleepTime = 10000;
}
try{
Thread.sleep(sleepTime);
} catch(InterruptedException ie) {
// Ignore
}
context.write(value, new IntWritable(1));
}
}
public static class SpeculativeReducer extends
Reducer<Text,IntWritable,Text,IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
// Make one reducer slower for speculative execution
TaskAttemptID taid = context.getTaskAttemptID();
long sleepTime = 100;
Configuration conf = context.getConfiguration();
boolean test_speculate_reduce =
conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
// IF TESTING REDUCE SPECULATIVE EXECUTION:
// Make the "*_r_000000_0" attempt take much longer than the others.
// When speculative execution is enabled, this should cause the attempt
// to be killed and restarted. At that point, the attempt ID will be
// "*_r_000000_1", so sleepTime will still remain 100ms.
if ( (taid.getTaskType() == TaskType.REDUCE) && test_speculate_reduce
&& (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
sleepTime = 10000;
}
try{
Thread.sleep(sleepTime);
} catch(InterruptedException ie) {
// Ignore
}
context.write(key,new IntWritable(0));
}
}
@Test
public void testSpeculativeExecution() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
/*------------------------------------------------------------------
* Test that Map/Red does not speculate if MAP_SPECULATIVE and
* REDUCE_SPECULATIVE are both false.
* -----------------------------------------------------------------
*/
Job job = runSpecTest(false, false);
boolean succeeded = job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
Counters counters = job.getCounters();
Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
.getValue());
Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
.getValue());
/*----------------------------------------------------------------------
* Test that Mapper speculates if MAP_SPECULATIVE is true and
* REDUCE_SPECULATIVE is false.
* ---------------------------------------------------------------------
*/
job = runSpecTest(true, false);
succeeded = job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
counters = job.getCounters();
// The long-running map will be killed and a new one started.
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
.getValue());
Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
.getValue());
Assert.assertEquals(1, counters.findCounter(JobCounter.NUM_KILLED_MAPS)
.getValue());
/*----------------------------------------------------------------------
* Test that Reducer speculates if REDUCE_SPECULATIVE is true and
* MAP_SPECULATIVE is false.
* ---------------------------------------------------------------------
*/
job = runSpecTest(false, true);
succeeded = job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
counters = job.getCounters();
// The long-running map will be killed and a new one started.
Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES)
.getValue());
}
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
FSDataOutputStream os = localFs.create(path);
os.writeBytes(contents);
os.close();
localFs.setPermission(path, new FsPermission("700"));
return path;
}
private Job runSpecTest(boolean mapspec, boolean redspec)
throws IOException, ClassNotFoundException, InterruptedException {
Path first = createTempFile("specexec_map_input1", "a\nz");
Path secnd = createTempFile("specexec_map_input2", "a\nz");
Configuration conf = mrCluster.getConfig();
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE,mapspec);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE,redspec);
conf.setClass(MRJobConfig.MR_AM_TASK_ESTIMATOR,
TestSpecEstimator.class,
TaskRuntimeEstimator.class);
Job job = Job.getInstance(conf);
job.setJarByClass(TestSpeculativeExecution.class);
job.setMapperClass(SpeculativeMapper.class);
job.setReducerClass(SpeculativeReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(2);
FileInputFormat.setInputPaths(job, first);
FileInputFormat.addInputPath(job, secnd);
FileOutputFormat.setOutputPath(job, TEST_OUT_DIR);
// Delete output directory if it exists.
try {
localFs.delete(TEST_OUT_DIR,true);
} catch (IOException e) {
// ignore
}
// Creates the Job Configuration
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.setMaxMapAttempts(2);
job.submit();
return job;
}
}
| 11,617 | 36.356913 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRMultipleOutputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.*;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
public class TestMRMultipleOutputs extends HadoopTestCase {
public TestMRMultipleOutputs() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testWithoutCounters() throws Exception {
_testMultipleOutputs(false);
_testMOWithJavaSerialization(false);
}
public void testWithCounters() throws Exception {
_testMultipleOutputs(true);
_testMOWithJavaSerialization(true);
}
private static String localPathRoot =
System.getProperty("test.build.data", "/tmp");
private static final Path ROOT_DIR = new Path(localPathRoot, "testing/mo");
private static final Path IN_DIR = new Path(ROOT_DIR, "input");
private static final Path OUT_DIR = new Path(ROOT_DIR, "output");
private static String TEXT = "text";
private static String SEQUENCE = "sequence";
public void setUp() throws Exception {
super.setUp();
Configuration conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(ROOT_DIR, true);
}
public void tearDown() throws Exception {
Configuration conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(ROOT_DIR, true);
super.tearDown();
}
protected void _testMOWithJavaSerialization(boolean withCounters) throws Exception {
String input = "a\nb\nc\nd\ne\nc\nd\ne";
Configuration conf = createJobConf();
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization," +
"org.apache.hadoop.io.serializer.WritableSerialization");
Job job = MapReduceTestUtil.createJob(conf, IN_DIR, OUT_DIR, 2, 1, input);
job.setJobName("mo");
MultipleOutputs.addNamedOutput(job, TEXT, TextOutputFormat.class,
Long.class, String.class);
MultipleOutputs.setCountersEnabled(job, withCounters);
job.setSortComparatorClass(JavaSerializationComparator.class);
job.setMapOutputKeyClass(Long.class);
job.setMapOutputValueClass(String.class);
job.setOutputKeyClass(Long.class);
job.setOutputValueClass(String.class);
job.setMapperClass(MOJavaSerDeMap.class);
job.setReducerClass(MOJavaSerDeReduce.class);
job.waitForCompletion(true);
// assert number of named output part files
int namedOutputCount = 0;
int valueBasedOutputCount = 0;
FileSystem fs = OUT_DIR.getFileSystem(conf);
FileStatus[] statuses = fs.listStatus(OUT_DIR);
for (FileStatus status : statuses) {
String fileName = status.getPath().getName();
if (fileName.equals("text-m-00000") ||
fileName.equals("text-m-00001") ||
fileName.equals("text-r-00000")) {
namedOutputCount++;
} else if (fileName.equals("a-r-00000") ||
fileName.equals("b-r-00000") ||
fileName.equals("c-r-00000") ||
fileName.equals("d-r-00000") ||
fileName.equals("e-r-00000")) {
valueBasedOutputCount++;
}
}
assertEquals(3, namedOutputCount);
assertEquals(5, valueBasedOutputCount);
// assert TextOutputFormat files correctness
BufferedReader reader = new BufferedReader(
new InputStreamReader(fs.open(
new Path(FileOutputFormat.getOutputPath(job), "text-r-00000"))));
int count = 0;
String line = reader.readLine();
while (line != null) {
assertTrue(line.endsWith(TEXT));
line = reader.readLine();
count++;
}
reader.close();
assertFalse(count == 0);
if (withCounters) {
CounterGroup counters =
job.getCounters().getGroup(MultipleOutputs.class.getName());
assertEquals(6, counters.size());
assertEquals(4, counters.findCounter(TEXT).getValue());
assertEquals(2, counters.findCounter("a").getValue());
assertEquals(2, counters.findCounter("b").getValue());
assertEquals(4, counters.findCounter("c").getValue());
assertEquals(4, counters.findCounter("d").getValue());
assertEquals(4, counters.findCounter("e").getValue());
}
}
protected void _testMultipleOutputs(boolean withCounters) throws Exception {
String input = "a\nb\nc\nd\ne\nc\nd\ne";
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, IN_DIR, OUT_DIR, 2, 1, input);
job.setJobName("mo");
MultipleOutputs.addNamedOutput(job, TEXT, TextOutputFormat.class,
LongWritable.class, Text.class);
MultipleOutputs.addNamedOutput(job, SEQUENCE,
SequenceFileOutputFormat.class, IntWritable.class, Text.class);
MultipleOutputs.setCountersEnabled(job, withCounters);
job.setMapperClass(MOMap.class);
job.setReducerClass(MOReduce.class);
job.waitForCompletion(true);
// assert number of named output part files
int namedOutputCount = 0;
int valueBasedOutputCount = 0;
FileSystem fs = OUT_DIR.getFileSystem(conf);
FileStatus[] statuses = fs.listStatus(OUT_DIR);
for (FileStatus status : statuses) {
String fileName = status.getPath().getName();
if (fileName.equals("text-m-00000") ||
fileName.equals("text-m-00001") ||
fileName.equals("text-r-00000") ||
fileName.equals("sequence_A-m-00000") ||
fileName.equals("sequence_A-m-00001") ||
fileName.equals("sequence_B-m-00000") ||
fileName.equals("sequence_B-m-00001") ||
fileName.equals("sequence_B-r-00000") ||
fileName.equals("sequence_C-r-00000")) {
namedOutputCount++;
} else if (fileName.equals("a-r-00000") ||
fileName.equals("b-r-00000") ||
fileName.equals("c-r-00000") ||
fileName.equals("d-r-00000") ||
fileName.equals("e-r-00000")) {
valueBasedOutputCount++;
}
}
assertEquals(9, namedOutputCount);
assertEquals(5, valueBasedOutputCount);
// assert TextOutputFormat files correctness
BufferedReader reader = new BufferedReader(
new InputStreamReader(fs.open(
new Path(FileOutputFormat.getOutputPath(job), "text-r-00000"))));
int count = 0;
String line = reader.readLine();
while (line != null) {
assertTrue(line.endsWith(TEXT));
line = reader.readLine();
count++;
}
reader.close();
assertFalse(count == 0);
// assert SequenceOutputFormat files correctness
SequenceFile.Reader seqReader =
new SequenceFile.Reader(fs, new Path(FileOutputFormat.getOutputPath(job),
"sequence_B-r-00000"), conf);
assertEquals(IntWritable.class, seqReader.getKeyClass());
assertEquals(Text.class, seqReader.getValueClass());
count = 0;
IntWritable key = new IntWritable();
Text value = new Text();
while (seqReader.next(key, value)) {
assertEquals(SEQUENCE, value.toString());
count++;
}
seqReader.close();
assertFalse(count == 0);
if (withCounters) {
CounterGroup counters =
job.getCounters().getGroup(MultipleOutputs.class.getName());
assertEquals(9, counters.size());
assertEquals(4, counters.findCounter(TEXT).getValue());
assertEquals(2, counters.findCounter(SEQUENCE + "_A").getValue());
assertEquals(4, counters.findCounter(SEQUENCE + "_B").getValue());
assertEquals(2, counters.findCounter(SEQUENCE + "_C").getValue());
assertEquals(2, counters.findCounter("a").getValue());
assertEquals(2, counters.findCounter("b").getValue());
assertEquals(4, counters.findCounter("c").getValue());
assertEquals(4, counters.findCounter("d").getValue());
assertEquals(4, counters.findCounter("e").getValue());
}
}
@SuppressWarnings("unchecked")
public static class MOMap extends Mapper<LongWritable, Text, LongWritable,
Text> {
private MultipleOutputs mos;
public void setup(Context context) {
mos = new MultipleOutputs(context);
}
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
if (value.toString().equals("a")) {
mos.write(TEXT, key, new Text(TEXT));
mos.write(SEQUENCE, new IntWritable(1), new Text(SEQUENCE),
(SEQUENCE + "_A"));
mos.write(SEQUENCE, new IntWritable(2), new Text(SEQUENCE),
(SEQUENCE + "_B"));
}
}
public void cleanup(Context context)
throws IOException, InterruptedException {
mos.close();
}
}
@SuppressWarnings("unchecked")
public static class MOReduce extends Reducer<LongWritable, Text,
LongWritable, Text> {
private MultipleOutputs mos;
public void setup(Context context) {
mos = new MultipleOutputs(context);
}
public void reduce(LongWritable key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
for (Text value : values) {
mos.write(key, value, value.toString());
if (!value.toString().equals("b")) {
context.write(key, value);
} else {
mos.write(TEXT, key, new Text(TEXT));
mos.write(SEQUENCE, new IntWritable(2), new Text(SEQUENCE),
(SEQUENCE + "_B"));
mos.write(SEQUENCE, new IntWritable(3), new Text(SEQUENCE),
(SEQUENCE + "_C"));
}
}
}
public void cleanup(Context context)
throws IOException, InterruptedException {
mos.close();
}
}
public static class MOJavaSerDeMap extends Mapper<LongWritable, Text, Long,
String> {
private MultipleOutputs<Long, String> mos;
public void setup(Context context) {
mos = new MultipleOutputs<Long, String>(context);
}
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
context.write(key.get(), value.toString());
if (value.toString().equals("a")) {
mos.write(TEXT, key.get(), TEXT);
}
}
public void cleanup(Context context)
throws IOException, InterruptedException {
mos.close();
}
}
public static class MOJavaSerDeReduce extends Reducer<Long, String,
Long, String> {
private MultipleOutputs<Long, String> mos;
public void setup(Context context) {
mos = new MultipleOutputs<Long, String>(context);
}
public void reduce(Long key, Iterable<String> values,
Context context) throws IOException, InterruptedException {
for (String value : values) {
mos.write(key, value, value.toString());
if (!value.toString().equals("b")) {
context.write(key, value);
} else {
mos.write(TEXT, key, new Text(TEXT));
}
}
}
public void cleanup(Context context)
throws IOException, InterruptedException {
mos.close();
}
}
}
| 12,161 | 33.067227 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRSequenceFileAsBinaryOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestMRSequenceFileAsBinaryOutputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestMRSequenceFileAsBinaryOutputFormat.class.getName());
private static final int RECORDS = 10000;
public void testBinary() throws IOException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
Path outdir = new Path(System.getProperty("test.build.data", "/tmp"),
"outseq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
FileOutputFormat.setOutputPath(job, outdir);
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job,
IntWritable.class );
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job,
DoubleWritable.class );
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.BLOCK);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
TaskAttemptContext context =
MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
OutputFormat<BytesWritable, BytesWritable> outputFormat =
new SequenceFileAsBinaryOutputFormat();
OutputCommitter committer = outputFormat.getOutputCommitter(context);
committer.setupJob(job);
RecordWriter<BytesWritable, BytesWritable> writer = outputFormat.
getRecordWriter(context);
IntWritable iwritable = new IntWritable();
DoubleWritable dwritable = new DoubleWritable();
DataOutputBuffer outbuf = new DataOutputBuffer();
LOG.info("Creating data by SequenceFileAsBinaryOutputFormat");
try {
for (int i = 0; i < RECORDS; ++i) {
iwritable = new IntWritable(r.nextInt());
iwritable.write(outbuf);
bkey.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
dwritable = new DoubleWritable(r.nextDouble());
dwritable.write(outbuf);
bval.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
writer.write(bkey, bval);
}
} finally {
writer.close(context);
}
committer.commitTask(context);
committer.commitJob(job);
InputFormat<IntWritable, DoubleWritable> iformat =
new SequenceFileInputFormat<IntWritable, DoubleWritable>();
int count = 0;
r.setSeed(seed);
SequenceFileInputFormat.setInputPaths(job, outdir);
LOG.info("Reading data by SequenceFileInputFormat");
for (InputSplit split : iformat.getSplits(job)) {
RecordReader<IntWritable, DoubleWritable> reader =
iformat.createRecordReader(split, context);
MapContext<IntWritable, DoubleWritable, BytesWritable, BytesWritable>
mcontext = new MapContextImpl<IntWritable, DoubleWritable,
BytesWritable, BytesWritable>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(),
split);
reader.initialize(split, mcontext);
try {
int sourceInt;
double sourceDouble;
while (reader.nextKeyValue()) {
sourceInt = r.nextInt();
sourceDouble = r.nextDouble();
iwritable = reader.getCurrentKey();
dwritable = reader.getCurrentValue();
assertEquals(
"Keys don't match: " + "*" + iwritable.get() + ":" +
sourceInt + "*",
sourceInt, iwritable.get());
assertTrue(
"Vals don't match: " + "*" + dwritable.get() + ":" +
sourceDouble + "*",
Double.compare(dwritable.get(), sourceDouble) == 0 );
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
throws IOException {
Job job = Job.getInstance();
// Setting Random class to test getSequenceFileOutput{Key,Value}Class
job.setOutputKeyClass(FloatWritable.class);
job.setOutputValueClass(BooleanWritable.class);
assertEquals("SequenceFileOutputKeyClass should default to ouputKeyClass",
FloatWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job));
assertEquals("SequenceFileOutputValueClass should default to "
+ "ouputValueClass",
BooleanWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job));
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job,
IntWritable.class );
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job,
DoubleWritable.class );
assertEquals("SequenceFileOutputKeyClass not updated",
IntWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputKeyClass(job));
assertEquals("SequenceFileOutputValueClass not updated",
DoubleWritable.class,
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job));
}
public void testcheckOutputSpecsForbidRecordCompression()
throws IOException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());
Path outputdir = new Path(System.getProperty("test.build.data", "/tmp")
+ "/output");
fs.delete(outputdir, true);
// Without outputpath, FileOutputFormat.checkoutputspecs will throw
// InvalidJobConfException
FileOutputFormat.setOutputPath(job, outputdir);
// SequenceFileAsBinaryOutputFormat doesn't support record compression
// It should throw an exception when checked by checkOutputSpecs
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.BLOCK);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
} catch (Exception e) {
fail("Block compression should be allowed for "
+ "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName());
}
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job,
CompressionType.RECORD);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
fail("Record compression should not be allowed for "
+ "SequenceFileAsBinaryOutputFormat");
} catch (InvalidJobConfException ie) {
// expected
} catch (Exception e) {
fail("Expected " + InvalidJobConfException.class.getName()
+ "but caught " + e.getClass().getName() );
}
}
}
| 8,682 | 39.574766 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.*;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.UtilsForTests;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
public class TestMRCJCFileOutputCommitter extends TestCase {
private static Path outDir = new Path(System.getProperty("test.build.data",
"/tmp"), "output");
// A random task attempt id for testing.
private static String attempt = "attempt_200707121733_0001_m_000000_0";
private static String partFile = "part-m-00000";
private static TaskAttemptID taskID = TaskAttemptID.forName(attempt);
private Text key1 = new Text("key1");
private Text key2 = new Text("key2");
private Text val1 = new Text("val1");
private Text val2 = new Text("val2");
@SuppressWarnings("unchecked")
private void writeOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(context);
}
}
private static void cleanup() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = outDir.getFileSystem(conf);
fs.delete(outDir, true);
}
@Override
public void setUp() throws IOException {
cleanup();
}
@Override
public void tearDown() throws IOException {
cleanup();
}
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do commit
committer.commitTask(tContext);
committer.commitJob(jContext);
// validate output
File expectedFile = new File(new Path(outDir, partFile).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(output, expectedOutput.toString());
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testEmptyOutput() throws Exception {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// Do not write any output
// do commit
committer.commitTask(tContext);
committer.commitJob(jContext);
FileUtil.fullyDelete(new File(outDir.toString()));
}
@SuppressWarnings("unchecked")
public void testAbort() throws IOException, InterruptedException {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do abort
committer.abortTask(tContext);
File expectedFile = new File(new Path(committer.getWorkPath(), partFile)
.toString());
assertFalse("task temp dir still exists", expectedFile.exists());
committer.abortJob(jContext, JobStatus.State.FAILED);
expectedFile = new File(new Path(outDir, FileOutputCommitter.PENDING_DIR_NAME)
.toString());
assertFalse("job temp dir still exists", expectedFile.exists());
assertEquals("Output directory not empty", 0, new File(outDir.toString())
.listFiles().length);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public static class FakeFileSystem extends RawLocalFileSystem {
public FakeFileSystem() {
super();
}
public URI getUri() {
return URI.create("faildel:///");
}
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
throw new IOException("fake delete failed");
}
}
@SuppressWarnings("unchecked")
public void testFailAbort() throws IOException, InterruptedException {
Job job = Job.getInstance();
Configuration conf = job.getConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, outDir);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter<?, ?> theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do abort
Throwable th = null;
try {
committer.abortTask(tContext);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
//Path taskBaseDirName = committer.getTaskAttemptBaseDirName(tContext);
File jobTmpDir = new File(committer.getJobAttemptPath(jContext).toUri().getPath());
File taskTmpDir = new File(committer.getTaskAttemptPath(tContext).toUri().getPath());
File expectedFile = new File(taskTmpDir, partFile);
assertTrue(expectedFile + " does not exists", expectedFile.exists());
th = null;
try {
committer.abortJob(jContext, JobStatus.State.FAILED);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue("job temp dir does not exists", jobTmpDir.exists());
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
| 9,128 | 35.810484 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestJobOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.UtilsForTests;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* A JUnit test to test Map-Reduce job committer.
*/
public class TestJobOutputCommitter extends HadoopTestCase {
public TestJobOutputCommitter() throws IOException {
super(CLUSTER_MR, LOCAL_FS, 1, 1);
}
private static String TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp")
+ "/" + "test-job-output-committer").toString();
private static final String CUSTOM_CLEANUP_FILE_NAME = "_custom_cleanup";
private static final String ABORT_KILLED_FILE_NAME = "_custom_abort_killed";
private static final String ABORT_FAILED_FILE_NAME = "_custom_abort_failed";
private static Path inDir = new Path(TEST_ROOT_DIR, "test-input");
private static int outDirs = 0;
private FileSystem fs;
private Configuration conf = null;
@Override
protected void setUp() throws Exception {
super.setUp();
conf = createJobConf();
fs = getFileSystem();
}
@Override
protected void tearDown() throws Exception {
fs.delete(new Path(TEST_ROOT_DIR), true);
super.tearDown();
}
/**
* Committer with deprecated {@link FileOutputCommitter#cleanupJob(JobContext)}
* making a _failed/_killed in the output folder
*/
static class CommitterWithCustomDeprecatedCleanup extends FileOutputCommitter {
public CommitterWithCustomDeprecatedCleanup(Path outputPath,
TaskAttemptContext context) throws IOException {
super(outputPath, context);
}
@Override
public void cleanupJob(JobContext context) throws IOException {
System.err.println("---- HERE ----");
Path outputPath = FileOutputFormat.getOutputPath(context);
FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
fs.create(new Path(outputPath, CUSTOM_CLEANUP_FILE_NAME)).close();
}
}
/**
* Committer with abort making a _failed/_killed in the output folder
*/
static class CommitterWithCustomAbort extends FileOutputCommitter {
public CommitterWithCustomAbort(Path outputPath, TaskAttemptContext context)
throws IOException {
super(outputPath, context);
}
@Override
public void abortJob(JobContext context, JobStatus.State state)
throws IOException {
Path outputPath = FileOutputFormat.getOutputPath(context);
FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
String fileName =
(state.equals(JobStatus.State.FAILED)) ? ABORT_FAILED_FILE_NAME
: ABORT_KILLED_FILE_NAME;
fs.create(new Path(outputPath, fileName)).close();
}
}
private Path getNewOutputDir() {
return new Path(TEST_ROOT_DIR, "output-" + outDirs++);
}
static class MyOutputFormatWithCustomAbort<K, V>
extends TextOutputFormat<K, V> {
private OutputCommitter committer = null;
public synchronized OutputCommitter getOutputCommitter(
TaskAttemptContext context) throws IOException {
if (committer == null) {
Path output = getOutputPath(context);
committer = new CommitterWithCustomAbort(output, context);
}
return committer;
}
}
static class MyOutputFormatWithCustomCleanup<K, V>
extends TextOutputFormat<K, V> {
private OutputCommitter committer = null;
public synchronized OutputCommitter getOutputCommitter(
TaskAttemptContext context) throws IOException {
if (committer == null) {
Path output = getOutputPath(context);
committer = new CommitterWithCustomDeprecatedCleanup(output, context);
}
return committer;
}
}
// run a job with 1 map and let it run to completion
private void testSuccessfulJob(String filename,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0);
job.setOutputFormatClass(output);
assertTrue("Job failed!", job.waitForCompletion(true));
Path testFile = new Path(outDir, filename);
assertTrue("Done file missing for job " + job.getJobID(), fs.exists(testFile));
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for successful job "
+ job.getJobID(), fs.exists(file));
}
}
// run a job for which all the attempts simply fail.
private void testFailedJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createFailJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
assertFalse("Job did not fail!", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for failed job " + job.getJobID(),
fs.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for failed job "
+ job.getJobID(), fs.exists(file));
}
}
// run a job which gets stuck in mapper and kill it.
private void testKilledJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
job.submit();
// wait for the setup to be completed
while (job.setupProgress() != 1.0f) {
UtilsForTests.waitFor(100);
}
job.killJob(); // kill the job
assertFalse("Job did not get kill", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job "
+ job.getJobID(), fs.exists(file));
}
}
/**
* Test default cleanup/abort behavior
*
* @throws Exception
*/
public void testDefaultCleanupAndAbort() throws Exception {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
TextOutputFormat.class, new String[] {});
// check with a failed job
testFailedJob(null, TextOutputFormat.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
// check default abort job kill
testKilledJob(null, TextOutputFormat.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
}
/**
* Test if a failed job with custom committer runs the abort code.
*
* @throws Exception
*/
public void testCustomAbort() throws Exception {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
MyOutputFormatWithCustomAbort.class,
new String[] {ABORT_FAILED_FILE_NAME,
ABORT_KILLED_FILE_NAME});
// check with a failed job
testFailedJob(ABORT_FAILED_FILE_NAME,
MyOutputFormatWithCustomAbort.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_KILLED_FILE_NAME});
// check with a killed job
testKilledJob(ABORT_KILLED_FILE_NAME,
MyOutputFormatWithCustomAbort.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_FAILED_FILE_NAME});
}
/**
* Test if a failed job with custom committer runs the deprecated
* {@link FileOutputCommitter#cleanupJob(JobContext)} code for api
* compatibility testing.
* @throws Exception
*/
public void testCustomCleanup() throws Exception {
// check with a successful job
testSuccessfulJob(CUSTOM_CLEANUP_FILE_NAME,
MyOutputFormatWithCustomCleanup.class,
new String[] {});
// check with a failed job
testFailedJob(CUSTOM_CLEANUP_FILE_NAME,
MyOutputFormatWithCustomCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
// check with a killed job
testKilledJob(CUSTOM_CLEANUP_FILE_NAME,
MyOutputFormatWithCustomCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
}
}
| 10,139 | 34.704225 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
public class TestDBOutputFormat extends TestCase {
private String[] fieldNames = new String[] { "id", "name", "value" };
private String[] nullFieldNames = new String[] { null, null, null };
private String expected = "INSERT INTO hadoop_output " +
"(id,name,value) VALUES (?,?,?);";
private String nullExpected = "INSERT INTO hadoop_output VALUES (?,?,?);";
private DBOutputFormat<DBWritable, NullWritable> format
= new DBOutputFormat<DBWritable, NullWritable>();
public void testConstructQuery() {
String actual = format.constructQuery("hadoop_output", fieldNames);
assertEquals(expected, actual);
actual = format.constructQuery("hadoop_output", nullFieldNames);
assertEquals(nullExpected, actual);
}
public void testSetOutput() throws IOException {
Job job = Job.getInstance(new Configuration());
DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);
DBConfiguration dbConf = new DBConfiguration(job.getConfiguration());
String actual = format.constructQuery(dbConf.getOutputTableName()
, dbConf.getOutputFieldNames());
assertEquals(expected, actual);
job = Job.getInstance(new Configuration());
dbConf = new DBConfiguration(job.getConfiguration());
DBOutputFormat.setOutput(job, "hadoop_output", nullFieldNames.length);
assertNull(dbConf.getOutputFieldNames());
assertEquals(nullFieldNames.length, dbConf.getOutputFieldCount());
actual = format.constructQuery(dbConf.getOutputTableName()
, new String[dbConf.getOutputFieldCount()]);
assertEquals(nullExpected, actual);
}
}
| 2,704 | 37.642857 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestTextSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import junit.framework.TestCase;
public class TestTextSplitter extends TestCase {
public String formatArray(Object [] ar) {
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (Object val : ar) {
if (!first) {
sb.append(", ");
}
sb.append(val.toString());
first = false;
}
sb.append("]");
return sb.toString();
}
public void assertArrayEquals(Object [] expected, Object [] actual) {
for (int i = 0; i < expected.length; i++) {
try {
assertEquals("Failure at position " + i + "; got " + actual[i]
+ " instead of " + expected[i] + "; actual array is " + formatArray(actual),
expected[i], actual[i]);
} catch (ArrayIndexOutOfBoundsException oob) {
fail("Expected array with " + expected.length + " elements; got " + actual.length
+ ". Actual array is " + formatArray(actual));
}
}
if (actual.length > expected.length) {
fail("Actual array has " + actual.length + " elements; expected " + expected.length
+ ". Actual array is " + formatArray(actual));
}
}
public void testStringConvertEmpty() {
TextSplitter splitter = new TextSplitter();
BigDecimal emptyBigDec = splitter.stringToBigDecimal("");
assertEquals(BigDecimal.ZERO, emptyBigDec);
}
public void testBigDecConvertEmpty() {
TextSplitter splitter = new TextSplitter();
String emptyStr = splitter.bigDecimalToString(BigDecimal.ZERO);
assertEquals("", emptyStr);
}
public void testConvertA() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("A"));
assertEquals("A", out);
}
public void testConvertZ() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("Z"));
assertEquals("Z", out);
}
public void testConvertThreeChars() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("abc"));
assertEquals("abc", out);
}
public void testConvertStr() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("big str"));
assertEquals("big str", out);
}
public void testConvertChomped() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("AVeryLongStringIndeed"));
assertEquals("AVeryLon", out);
}
public void testAlphabetSplit() throws SQLException {
// This should give us 25 splits, one per letter.
TextSplitter splitter = new TextSplitter();
List<String> splits = splitter.split(25, "A", "Z", "");
String [] expected = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K",
"L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" };
assertArrayEquals(expected, splits.toArray(new String [0]));
}
public void testCommonPrefix() throws SQLException {
// Splits between 'Hand' and 'Hardy'
TextSplitter splitter = new TextSplitter();
List<String> splits = splitter.split(5, "nd", "rdy", "Ha");
// Don't check for exact values in the middle, because the splitter generates some
// ugly Unicode-isms. But do check that we get multiple splits and that it starts
// and ends on the correct points.
assertEquals("Hand", splits.get(0));
assertEquals("Hardy", splits.get(splits.size() -1));
assertEquals(6, splits.size());
}
}
| 4,584 | 34.542636 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDataDrivenDBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.*;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
//import org.apache.hadoop.examples.DBCountPageView;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.db.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.util.StringUtils;
import org.hsqldb.server.Server;
/**
* Test aspects of DataDrivenDBInputFormat
*/
public class TestDataDrivenDBInputFormat extends HadoopTestCase {
private static final Log LOG = LogFactory.getLog(
TestDataDrivenDBInputFormat.class);
private static final String DB_NAME = "dddbif";
private static final String DB_URL =
"jdbc:hsqldb:hsql://localhost/" + DB_NAME;
private static final String DRIVER_CLASS = "org.hsqldb.jdbc.JDBCDriver";
private Server server;
private Connection connection;
private static final String OUT_DIR;
public TestDataDrivenDBInputFormat() throws IOException {
super(LOCAL_MR, LOCAL_FS, 1, 1);
}
static {
OUT_DIR = System.getProperty("test.build.data", "/tmp") + "/dddbifout";
}
private void startHsqldbServer() {
if (null == server) {
server = new Server();
server.setDatabasePath(0,
System.getProperty("test.build.data", "/tmp") + "/" + DB_NAME);
server.setDatabaseName(0, DB_NAME);
server.start();
}
}
private void createConnection(String driverClassName,
String url) throws Exception {
Class.forName(driverClassName);
connection = DriverManager.getConnection(url);
connection.setAutoCommit(false);
}
private void shutdown() {
try {
connection.commit();
connection.close();
connection = null;
}catch (Throwable ex) {
LOG.warn("Exception occurred while closing connection :"
+ StringUtils.stringifyException(ex));
} finally {
try {
if(server != null) {
server.shutdown();
}
}catch (Throwable ex) {
LOG.warn("Exception occurred while shutting down HSQLDB :"
+ StringUtils.stringifyException(ex));
}
server = null;
}
}
private void initialize(String driverClassName, String url)
throws Exception {
startHsqldbServer();
createConnection(driverClassName, url);
}
public void setUp() throws Exception {
initialize(DRIVER_CLASS, DB_URL);
super.setUp();
}
public void tearDown() throws Exception {
super.tearDown();
shutdown();
}
public static class DateCol implements DBWritable, WritableComparable {
Date d;
public String toString() {
return d.toString();
}
public void readFields(ResultSet rs) throws SQLException {
d = rs.getDate(1);
}
public void write(PreparedStatement ps) {
// not needed.
}
public void readFields(DataInput in) throws IOException {
long v = in.readLong();
d = new Date(v);
}
public void write(DataOutput out) throws IOException {
out.writeLong(d.getTime());
}
@Override
public int hashCode() {
return (int) d.getTime();
}
@Override
public int compareTo(Object o) {
if (o instanceof DateCol) {
Long v = Long.valueOf(d.getTime());
Long other = Long.valueOf(((DateCol) o).d.getTime());
return v.compareTo(other);
} else {
return -1;
}
}
}
public static class ValMapper
extends Mapper<Object, Object, Object, NullWritable> {
public void map(Object k, Object v, Context c)
throws IOException, InterruptedException {
c.write(v, NullWritable.get());
}
}
public void testDateSplits() throws Exception {
Statement s = connection.createStatement();
final String DATE_TABLE = "datetable";
final String COL = "foo";
try {
// delete the table if it already exists.
s.executeUpdate("DROP TABLE " + DATE_TABLE);
} catch (SQLException e) {
}
// Create the table.
s.executeUpdate("CREATE TABLE " + DATE_TABLE + "(" + COL + " DATE)");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-01')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-02')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-05-01')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2011-04-01')");
// commit this tx.
connection.commit();
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
FileSystem fs = FileSystem.getLocal(conf);
fs.delete(new Path(OUT_DIR), true);
// now do a dd import
Job job = Job.getInstance(conf);
job.setMapperClass(ValMapper.class);
job.setReducerClass(Reducer.class);
job.setMapOutputKeyClass(DateCol.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(DateCol.class);
job.setOutputValueClass(NullWritable.class);
job.setNumReduceTasks(1);
job.getConfiguration().setInt("mapreduce.map.tasks", 2);
FileOutputFormat.setOutputPath(job, new Path(OUT_DIR));
DBConfiguration.configureDB(job.getConfiguration(), DRIVER_CLASS,
DB_URL, null, null);
DataDrivenDBInputFormat.setInput(job, DateCol.class, DATE_TABLE, null,
COL, COL);
boolean ret = job.waitForCompletion(true);
assertTrue("job failed", ret);
// Check to see that we imported as much as we thought we did.
assertEquals("Did not get all the records", 4,
job.getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS)
.getValue());
}
}
| 6,743 | 29.242152 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestIntegerSplitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import junit.framework.TestCase;
public class TestIntegerSplitter extends TestCase {
private long [] toLongArray(List<Long> in) {
long [] out = new long[in.size()];
for (int i = 0; i < in.size(); i++) {
out[i] = in.get(i).longValue();
}
return out;
}
public String formatLongArray(long [] ar) {
StringBuilder sb = new StringBuilder();
sb.append("[");
boolean first = true;
for (long val : ar) {
if (!first) {
sb.append(", ");
}
sb.append(Long.toString(val));
first = false;
}
sb.append("]");
return sb.toString();
}
public void assertLongArrayEquals(long [] expected, long [] actual) {
for (int i = 0; i < expected.length; i++) {
try {
assertEquals("Failure at position " + i + "; got " + actual[i]
+ " instead of " + expected[i] + "; actual array is " + formatLongArray(actual),
expected[i], actual[i]);
} catch (ArrayIndexOutOfBoundsException oob) {
fail("Expected array with " + expected.length + " elements; got " + actual.length
+ ". Actual array is " + formatLongArray(actual));
}
}
if (actual.length > expected.length) {
fail("Actual array has " + actual.length + " elements; expected " + expected.length
+ ". ACtual array is " + formatLongArray(actual));
}
}
public void testEvenSplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(10, 0, 100);
long [] expected = { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
assertLongArrayEquals(expected, toLongArray(splits));
}
public void testOddSplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(10, 0, 95);
long [] expected = { 0, 9, 18, 27, 36, 45, 54, 63, 72, 81, 90, 95 };
assertLongArrayEquals(expected, toLongArray(splits));
}
public void testSingletonSplit() throws SQLException {
List<Long> splits = new IntegerSplitter().split(1, 5, 5);
long [] expected = { 5, 5 };
assertLongArrayEquals(expected, toLongArray(splits));
}
public void testSingletonSplit2() throws SQLException {
// Same test, but overly-high numSplits
List<Long> splits = new IntegerSplitter().split(5, 5, 5);
long [] expected = { 5, 5 };
assertLongArrayEquals(expected, toLongArray(splits));
}
public void testTooManySplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(5, 3, 5);
long [] expected = { 3, 4, 5 };
assertLongArrayEquals(expected, toLongArray(splits));
}
}
| 3,562 | 32.299065 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
/**
* Tests error conditions in ChainMapper/ChainReducer.
*/
public class TestChainErrors extends HadoopTestCase {
private static String localPathRoot = System.getProperty("test.build.data",
"/tmp");
public TestChainErrors() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
private Path inDir = new Path(localPathRoot, "testing/chain/input");
private Path outDir = new Path(localPathRoot, "testing/chain/output");
private String input = "a\nb\nc\nd\n";
/**
* Tests errors during submission.
*
* @throws Exception
*/
public void testChainSubmission() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 0, 0, input);
job.setJobName("chain");
Throwable th = null;
// output key,value classes of first map are not same as that of second map
try {
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
IntWritable.class, Text.class, null);
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
} catch (IllegalArgumentException iae) {
th = iae;
}
assertTrue(th != null);
th = null;
// output key,value classes of reducer are not
// same as that of mapper in the chain
try {
ChainReducer.setReducer(job, Reducer.class, LongWritable.class,
Text.class, IntWritable.class, Text.class, null);
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
} catch (IllegalArgumentException iae) {
th = iae;
}
assertTrue(th != null);
}
/**
* Tests one of the mappers throwing exception.
*
* @throws Exception
*/
public void testChainFail() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0, input);
job.setJobName("chain");
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainMapper.addMapper(job, FailMap.class, LongWritable.class, Text.class,
IntWritable.class, Text.class, null);
ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job Not failed", !job.isSuccessful());
}
/**
* Tests Reducer throwing exception.
*
* @throws Exception
*/
public void testReducerFail() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1, input);
job.setJobName("chain");
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainReducer.setReducer(job, FailReduce.class, LongWritable.class,
Text.class, LongWritable.class, Text.class, null);
ChainReducer.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job Not failed", !job.isSuccessful());
}
/**
* Tests one of the maps consuming output.
*
* @throws Exception
*/
public void testChainMapNoOuptut() throws Exception {
Configuration conf = createJobConf();
String expectedOutput = "";
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0, input);
job.setJobName("chain");
ChainMapper.addMapper(job, ConsumeMap.class, IntWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
assertEquals("Outputs doesn't match", expectedOutput, MapReduceTestUtil
.readOutput(outDir, conf));
}
/**
* Tests reducer consuming output.
*
* @throws Exception
*/
public void testChainReduceNoOuptut() throws Exception {
Configuration conf = createJobConf();
String expectedOutput = "";
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1, input);
job.setJobName("chain");
ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainReducer.setReducer(job, ConsumeReduce.class, LongWritable.class,
Text.class, LongWritable.class, Text.class, null);
ChainReducer.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
assertEquals("Outputs doesn't match", expectedOutput, MapReduceTestUtil
.readOutput(outDir, conf));
}
// this map consumes all the input and output nothing
public static class ConsumeMap extends
Mapper<LongWritable, Text, LongWritable, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
}
}
// this reduce consumes all the input and output nothing
public static class ConsumeReduce extends
Reducer<LongWritable, Text, LongWritable, Text> {
public void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
}
}
// this map throws IOException for input value "b"
public static class FailMap extends
Mapper<LongWritable, Text, IntWritable, Text> {
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
if (value.toString().equals("b")) {
throw new IOException();
}
}
}
// this reduce throws IOEexception for any input
public static class FailReduce extends
Reducer<LongWritable, Text, LongWritable, Text> {
public void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
throw new IOException();
}
}
}
| 7,516 | 32.558036 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/chain/TestMapReduceChain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
public class TestMapReduceChain extends HadoopTestCase {
private static String localPathRoot = System.getProperty("test.build.data",
"/tmp");
private static Path flagDir = new Path(localPathRoot, "testing/chain/flags");
private static void cleanFlags(Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf);
fs.delete(flagDir, true);
fs.mkdirs(flagDir);
}
private static void writeFlag(Configuration conf, String flag)
throws IOException {
FileSystem fs = FileSystem.get(conf);
if (getFlag(conf, flag)) {
fail("Flag " + flag + " already exists");
}
DataOutputStream file = fs.create(new Path(flagDir, flag));
file.close();
}
private static boolean getFlag(Configuration conf, String flag)
throws IOException {
FileSystem fs = FileSystem.get(conf);
return fs.exists(new Path(flagDir, flag));
}
public TestMapReduceChain() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testChain() throws Exception {
Path inDir = new Path(localPathRoot, "testing/chain/input");
Path outDir = new Path(localPathRoot, "testing/chain/output");
String input = "1\n2\n";
String expectedOutput = "0\t1ABCRDEF\n2\t2ABCRDEF\n";
Configuration conf = createJobConf();
cleanFlags(conf);
conf.set("a", "X");
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1, input);
job.setJobName("chain");
Configuration mapAConf = new Configuration(false);
mapAConf.set("a", "A");
ChainMapper.addMapper(job, AMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, mapAConf);
ChainMapper.addMapper(job, BMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainMapper.addMapper(job, CMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
Configuration reduceConf = new Configuration(false);
reduceConf.set("a", "C");
ChainReducer.setReducer(job, RReduce.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, reduceConf);
ChainReducer.addMapper(job, DMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
Configuration mapEConf = new Configuration(false);
mapEConf.set("a", "E");
ChainReducer.addMapper(job, EMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, mapEConf);
ChainReducer.addMapper(job, FMap.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
String str = "flag not set";
assertTrue(str, getFlag(conf, "map.setup.A"));
assertTrue(str, getFlag(conf, "map.setup.B"));
assertTrue(str, getFlag(conf, "map.setup.C"));
assertTrue(str, getFlag(conf, "reduce.setup.R"));
assertTrue(str, getFlag(conf, "map.setup.D"));
assertTrue(str, getFlag(conf, "map.setup.E"));
assertTrue(str, getFlag(conf, "map.setup.F"));
assertTrue(str, getFlag(conf, "map.A.value.1"));
assertTrue(str, getFlag(conf, "map.A.value.2"));
assertTrue(str, getFlag(conf, "map.B.value.1A"));
assertTrue(str, getFlag(conf, "map.B.value.2A"));
assertTrue(str, getFlag(conf, "map.C.value.1AB"));
assertTrue(str, getFlag(conf, "map.C.value.2AB"));
assertTrue(str, getFlag(conf, "reduce.R.value.1ABC"));
assertTrue(str, getFlag(conf, "reduce.R.value.2ABC"));
assertTrue(str, getFlag(conf, "map.D.value.1ABCR"));
assertTrue(str, getFlag(conf, "map.D.value.2ABCR"));
assertTrue(str, getFlag(conf, "map.E.value.1ABCRD"));
assertTrue(str, getFlag(conf, "map.E.value.2ABCRD"));
assertTrue(str, getFlag(conf, "map.F.value.1ABCRDE"));
assertTrue(str, getFlag(conf, "map.F.value.2ABCRDE"));
assertTrue(getFlag(conf, "map.cleanup.A"));
assertTrue(getFlag(conf, "map.cleanup.B"));
assertTrue(getFlag(conf, "map.cleanup.C"));
assertTrue(getFlag(conf, "reduce.cleanup.R"));
assertTrue(getFlag(conf, "map.cleanup.D"));
assertTrue(getFlag(conf, "map.cleanup.E"));
assertTrue(getFlag(conf, "map.cleanup.F"));
assertEquals("Outputs doesn't match", expectedOutput, MapReduceTestUtil
.readOutput(outDir, conf));
}
public static class AMap extends IDMap {
public AMap() {
super("A", "A");
}
}
public static class BMap extends IDMap {
public BMap() {
super("B", "X");
}
}
public static class CMap extends IDMap {
public CMap() {
super("C", "X");
}
}
public static class RReduce extends IDReduce {
public RReduce() {
super("R", "C");
}
}
public static class DMap extends IDMap {
public DMap() {
super("D", "X");
}
}
public static class EMap extends IDMap {
public EMap() {
super("E", "E");
}
}
public static class FMap extends IDMap {
public FMap() {
super("F", "X");
}
}
public static class IDMap extends
Mapper<LongWritable, Text, LongWritable, Text> {
private String name;
private String prop;
public IDMap(String name, String prop) {
this.name = name;
this.prop = prop;
}
public void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
assertEquals(prop, conf.get("a"));
writeFlag(conf, "map.setup." + name);
}
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
writeFlag(context.getConfiguration(), "map." + name + ".value." + value);
context.write(key, new Text(value + name));
}
public void cleanup(Context context) throws IOException,
InterruptedException {
writeFlag(context.getConfiguration(), "map.cleanup." + name);
}
}
public static class IDReduce extends
Reducer<LongWritable, Text, LongWritable, Text> {
private String name;
private String prop;
public IDReduce(String name, String prop) {
this.name = name;
this.prop = prop;
}
public void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
assertEquals(prop, conf.get("a"));
writeFlag(conf, "reduce.setup." + name);
}
public void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
for (Text value : values) {
writeFlag(context.getConfiguration(), "reduce." + name + ".value."
+ value);
context.write(key, new Text(value + name));
}
}
public void cleanup(Context context) throws IOException,
InterruptedException {
writeFlag(context.getConfiguration(), "reduce.cleanup." + name);
}
}
}
| 8,260 | 32.445344 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/chain/TestSingleElementChain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.chain;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import java.io.IOException;
/**
* Runs wordcount by adding single mapper and single reducer to chain
*/
public class TestSingleElementChain extends HadoopTestCase {
private static String localPathRoot = System.getProperty("test.build.data",
"/tmp");
public TestSingleElementChain() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
// test chain mapper and reducer by adding single mapper and reducer to chain
public void testNoChain() throws Exception {
Path inDir = new Path(localPathRoot, "testing/chain/input");
Path outDir = new Path(localPathRoot, "testing/chain/output");
String input = "a\nb\na\n";
String expectedOutput = "a\t2\nb\t1\n";
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1, input);
job.setJobName("chain");
ChainMapper.addMapper(job, TokenCounterMapper.class, Object.class,
Text.class, Text.class, IntWritable.class, null);
ChainReducer.setReducer(job, IntSumReducer.class, Text.class,
IntWritable.class, Text.class, IntWritable.class, null);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
assertEquals("Outputs doesn't match", expectedOutput, MapReduceTestUtil
.readOutput(outDir, conf));
}
}
| 2,616 | 36.927536 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.map.InverseMapper;
public class TestMRKeyFieldBasedComparator extends HadoopTestCase {
Configuration conf;
String line1 = "123 -123 005120 123.9 0.01 0.18 010 10.0 4444.1 011 011 234";
String line2 = "134 -12 005100 123.10 -1.01 0.19 02 10.1 4444";
public TestMRKeyFieldBasedComparator() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
conf = createJobConf();
conf.set(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
}
private void testComparator(String keySpec, int expect)
throws Exception {
String root = System.getProperty("test.build.data", "/tmp");
Path inDir = new Path(root, "test_cmp/in");
Path outDir = new Path(root, "test_cmp/out");
conf.set("mapreduce.partition.keycomparator.options", keySpec);
conf.set("mapreduce.partition.keypartitioner.options", "-k1.1,1.1");
conf.set(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1,
line1 +"\n" + line2 + "\n");
job.setMapperClass(InverseMapper.class);
job.setReducerClass(Reducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setSortComparatorClass(KeyFieldBasedComparator.class);
job.setPartitionerClass(KeyFieldBasedPartitioner.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
// validate output
Path[] outputFiles = FileUtil.stat2Paths(getFileSystem().listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
//make sure we get what we expect as the first line, and also
//that we have two lines (both the lines must end up in the same
//reducer since the partitioner takes the same key spec for all
//lines
if (expect == 1) {
assertTrue(line.startsWith(line1));
} else if (expect == 2) {
assertTrue(line.startsWith(line2));
}
line = reader.readLine();
if (expect == 1) {
assertTrue(line.startsWith(line2));
} else if (expect == 2) {
assertTrue(line.startsWith(line1));
}
reader.close();
}
}
public void testBasicUnixComparator() throws Exception {
testComparator("-k1,1n", 1);
testComparator("-k2,2n", 1);
testComparator("-k2.2,2n", 2);
testComparator("-k3.4,3n", 2);
testComparator("-k3.2,3.3n -k4,4n", 2);
testComparator("-k3.2,3.3n -k4,4nr", 1);
testComparator("-k2.4,2.4n", 2);
testComparator("-k7,7", 1);
testComparator("-k7,7n", 2);
testComparator("-k8,8n", 1);
testComparator("-k9,9", 2);
testComparator("-k11,11",2);
testComparator("-k10,10",2);
testWithoutMRJob("-k9,9", 1);
testWithoutMRJob("-k9n", 1);
}
byte[] line1_bytes = line1.getBytes();
byte[] line2_bytes = line2.getBytes();
public void testWithoutMRJob(String keySpec, int expect) throws Exception {
KeyFieldBasedComparator<Void, Void> keyFieldCmp =
new KeyFieldBasedComparator<Void, Void>();
conf.set("mapreduce.partition.keycomparator.options", keySpec);
keyFieldCmp.setConf(conf);
int result = keyFieldCmp.compare(line1_bytes, 0, line1_bytes.length,
line2_bytes, 0, line2_bytes.length);
if ((expect >= 0 && result < 0) || (expect < 0 && result >= 0))
fail();
}
}
| 4,950 | 36.793893 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestKeyFieldHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import junit.framework.TestCase;
public class TestKeyFieldHelper extends TestCase {
private static final Log LOG = LogFactory.getLog(TestKeyFieldHelper.class);
/**
* Test is key-field-helper's parse option.
*/
public void testparseOption() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
String keySpecs = "-k1.2,3.4";
String eKeySpecs = keySpecs;
helper.parseOption(keySpecs);
String actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
// test -k a.b
keySpecs = "-k 1.2";
eKeySpecs = "-k1.2,0.0";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr -k1.2,3.4";
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr -k1.2,3.4n";
eKeySpecs = "-k1.2,3.4n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr -k1.2,3.4r";
eKeySpecs = "-k1.2,3.4r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr -k1.2,3.4 -k5.6,7.8n -k9.10,11.12r -k13.14,15.16nr";
//1st
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
// 2nd
eKeySpecs = "-k5.6,7.8n";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
//3rd
eKeySpecs = "-k9.10,11.12r";
actKeySpecs = helper.keySpecs().get(2).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
//4th
eKeySpecs = "-k13.14,15.16nr";
actKeySpecs = helper.keySpecs().get(3).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2n,3.4";
eKeySpecs = "-k1.2,3.4n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2r,3.4";
eKeySpecs = "-k1.2,3.4r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2nr,3.4";
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4n";
eKeySpecs = "-k1.2,3.4n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4r";
eKeySpecs = "-k1.2,3.4r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4nr";
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr -k1.2,3.4 -k5.6,7.8";
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8nr";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-n -k1.2,3.4 -k5.6,7.8";
eKeySpecs = "-k1.2,3.4n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8n";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-r -k1.2,3.4 -k5.6,7.8";
eKeySpecs = "-k1.2,3.4r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8r";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4n -k5.6,7.8";
eKeySpecs = "-k1.2,3.4n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4r -k5.6,7.8";
eKeySpecs = "-k1.2,3.4r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-k1.2,3.4nr -k5.6,7.8";
eKeySpecs = "-k1.2,3.4nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
eKeySpecs = "-k5.6,7.8";
actKeySpecs = helper.keySpecs().get(1).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-n";
eKeySpecs = "-k1.1,0.0n";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-r";
eKeySpecs = "-k1.1,0.0r";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
keySpecs = "-nr";
eKeySpecs = "-k1.1,0.0nr";
helper = new KeyFieldHelper();
helper.parseOption(keySpecs);
actKeySpecs = helper.keySpecs().get(0).toString();
assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
}
/**
* Test is key-field-helper's getWordLengths.
*/
public void testGetWordLengths() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
// test getWordLengths with unspecified key-specifications
String input = "hi";
int[] result = helper.getWordLengths(input.getBytes(), 0, 2);
assertTrue(equals(result, new int[] {1}));
// set the key specs
helper.setKeyFieldSpec(1, 2);
// test getWordLengths with 3 words
input = "hi\thello there";
result = helper.getWordLengths(input.getBytes(), 0, input.length());
assertTrue(equals(result, new int[] {2, 2, 11}));
// test getWordLengths with 4 words but with a different separator
helper.setKeyFieldSeparator(" ");
input = "hi hello\tthere you";
result = helper.getWordLengths(input.getBytes(), 0, input.length());
assertTrue(equals(result, new int[] {3, 2, 11, 3}));
// test with non zero start index
input = "hi hello there you where me there";
// .....................
result = helper.getWordLengths(input.getBytes(), 10, 33);
assertTrue(equals(result, new int[] {5, 4, 3, 5, 2, 3}));
input = "hi hello there you where me ";
// ..................
result = helper.getWordLengths(input.getBytes(), 10, input.length());
assertTrue(equals(result, new int[] {5, 4, 3, 5, 2, 0}));
input = "";
result = helper.getWordLengths(input.getBytes(), 0, 0);
assertTrue(equals(result, new int[] {1, 0}));
input = " abc";
result = helper.getWordLengths(input.getBytes(), 0, 5);
assertTrue(equals(result, new int[] {3, 0, 0, 3}));
input = " abc";
result = helper.getWordLengths(input.getBytes(), 0, 2);
assertTrue(equals(result, new int[] {3, 0, 0, 0}));
input = " abc ";
result = helper.getWordLengths(input.getBytes(), 0, 2);
assertTrue(equals(result, new int[] {2, 0, 1}));
helper.setKeyFieldSeparator("abcd");
input = "abc";
result = helper.getWordLengths(input.getBytes(), 0, 3);
assertTrue(equals(result, new int[] {1, 3}));
}
/**
* Test is key-field-helper's getStartOffset/getEndOffset.
*/
public void testgetStartEndOffset() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
// test getStartOffset with -k1,2
helper.setKeyFieldSpec(1, 2);
String input = "hi\thello";
String expectedOutput = input;
testKeySpecs(input, expectedOutput, helper);
// test getStartOffset with -k1.0,0 .. should result into start = -1
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1.0,0");
testKeySpecs(input, null, helper);
// test getStartOffset with -k1,0
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1,0");
expectedOutput = input;
testKeySpecs(input, expectedOutput, helper);
// test getStartOffset with -k1.2,0
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1.2,0");
expectedOutput = "i\thello";
testKeySpecs(input, expectedOutput, helper);
// test getWordLengths with -k1.0,2.3
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1.1,2.3");
expectedOutput = "hi\thel";
testKeySpecs(input, expectedOutput, helper);
// test getWordLengths with -k1.2,2.3
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1.2,2.3");
expectedOutput = "i\thel";
testKeySpecs(input, expectedOutput, helper);
// test getStartOffset with -k1.2,3.0
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k1.2,3.0");
expectedOutput = "i\thello";
testKeySpecs(input, expectedOutput, helper);
// test getStartOffset with -k2,2
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k2,2");
expectedOutput = "hello";
testKeySpecs(input, expectedOutput, helper);
// test getStartOffset with -k3.0,4.0
helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k3.1,4.0");
testKeySpecs(input, null, helper);
// test getStartOffset with -k2.1
helper = new KeyFieldHelper();
input = "123123123123123hi\thello\thow";
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k2.1");
expectedOutput = "hello\thow";
testKeySpecs(input, expectedOutput, helper, 15, input.length());
// test getStartOffset with -k2.1,4 with end ending on \t
helper = new KeyFieldHelper();
input = "123123123123123hi\thello\t\thow\tare";
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k2.1,3");
expectedOutput = "hello\t";
testKeySpecs(input, expectedOutput, helper, 17, input.length());
// test getStartOffset with -k2.1 with end ending on \t
helper = new KeyFieldHelper();
input = "123123123123123hi\thello\thow\tare";
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k2.1");
expectedOutput = "hello\thow\t";
testKeySpecs(input, expectedOutput, helper, 17, 28);
// test getStartOffset with -k2.1,3 with smaller length
helper = new KeyFieldHelper();
input = "123123123123123hi\thello\thow";
helper.setKeyFieldSeparator("\t");
helper.parseOption("-k2.1,3");
expectedOutput = "hello";
testKeySpecs(input, expectedOutput, helper, 15, 23);
}
private void testKeySpecs(String input, String expectedOutput,
KeyFieldHelper helper) {
testKeySpecs(input, expectedOutput, helper, 0, -1);
}
private void testKeySpecs(String input, String expectedOutput,
KeyFieldHelper helper, int s1, int e1) {
LOG.info("input : " + input);
String keySpecs = helper.keySpecs().get(0).toString();
LOG.info("keyspecs : " + keySpecs);
byte[] inputBytes = input.getBytes(); // get the input bytes
if (e1 == -1) {
e1 = inputBytes.length;
}
LOG.info("length : " + e1);
// get the word lengths
int[] indices = helper.getWordLengths(inputBytes, s1, e1);
// get the start index
int start = helper.getStartOffset(inputBytes, s1, e1, indices,
helper.keySpecs().get(0));
LOG.info("start : " + start);
if (expectedOutput == null) {
assertEquals("Expected -1 when the start index is invalid", -1, start);
return;
}
// get the end index
int end = helper.getEndOffset(inputBytes, s1, e1, indices,
helper.keySpecs().get(0));
LOG.info("end : " + end);
//my fix
end = (end >= inputBytes.length) ? inputBytes.length -1 : end;
int length = end + 1 - start;
LOG.info("length : " + length);
byte[] outputBytes = new byte[length];
System.arraycopy(inputBytes, start, outputBytes, 0, length);
String output = new String(outputBytes);
LOG.info("output : " + output);
LOG.info("expected-output : " + expectedOutput);
assertEquals(keySpecs + " failed on input '" + input + "'",
expectedOutput, output);
}
// check for equality of 2 int arrays
private boolean equals(int[] test, int[] expected) {
// check array length
if (test[0] != expected[0]) {
return false;
}
// if length is same then check the contents
for (int i = 0; i < test[0] && i < expected[0]; ++i) {
if (test[i] != expected[i]) {
return false;
}
}
return true;
}
}
| 16,359 | 37.494118 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestMRKeyFieldBasedPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import junit.framework.TestCase;
public class TestMRKeyFieldBasedPartitioner extends TestCase {
/**
* Test is key-field-based partitioned works with empty key.
*/
public void testEmptyKey() throws Exception {
int numReducers = 10;
KeyFieldBasedPartitioner<Text, Text> kfbp =
new KeyFieldBasedPartitioner<Text, Text>();
Configuration conf = new Configuration();
conf.setInt("num.key.fields.for.partition", 10);
kfbp.setConf(conf);
assertEquals("Empty key should map to 0th partition",
0, kfbp.getPartition(new Text(), new Text(), numReducers));
// check if the hashcode is correct when no keyspec is specified
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
kfbp.setConf(conf);
String input = "abc\tdef\txyz";
int hashCode = input.hashCode();
int expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// check if the hashcode is correct with specified keyspec
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k2,2");
kfbp.setConf(conf);
String expectedOutput = "def";
byte[] eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, 0);
expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// test with invalid end index in keyspecs
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k2,5");
kfbp.setConf(conf);
expectedOutput = "def\txyz";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, 0);
expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// test with 0 end index in keyspecs
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k2");
kfbp.setConf(conf);
expectedOutput = "def\txyz";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, 0);
expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// test with invalid keyspecs
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k10");
kfbp.setConf(conf);
assertEquals("Partitioner doesnt work as expected", 0,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// test with multiple keyspecs
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k2,2 -k4,4");
kfbp.setConf(conf);
input = "abc\tdef\tpqr\txyz";
expectedOutput = "def";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, 0);
expectedOutput = "xyz";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, hashCode);
expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
// test with invalid start index in keyspecs
kfbp = new KeyFieldBasedPartitioner<Text, Text>();
conf = new Configuration();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k2,2 -k30,21 -k4,4 -k5");
kfbp.setConf(conf);
expectedOutput = "def";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, 0);
expectedOutput = "xyz";
eBytes = expectedOutput.getBytes();
hashCode = kfbp.hashCode(eBytes, 0, eBytes.length - 1, hashCode);
expectedPartition = kfbp.getPartition(hashCode, numReducers);
assertEquals("Partitioner doesnt work as expected", expectedPartition,
kfbp.getPartition(new Text(input), new Text(), numReducers));
}
}
| 5,666 | 43.97619 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.MRJobConfig;
public class TestTotalOrderPartitioner extends TestCase {
private static final Text[] splitStrings = new Text[] {
// -inf // 0
new Text("aabbb"), // 1
new Text("babbb"), // 2
new Text("daddd"), // 3
new Text("dddee"), // 4
new Text("ddhee"), // 5
new Text("dingo"), // 6
new Text("hijjj"), // 7
new Text("n"), // 8
new Text("yak"), // 9
};
static class Check<T> {
T data;
int part;
Check(T data, int part) {
this.data = data;
this.part = part;
}
}
private static final ArrayList<Check<Text>> testStrings =
new ArrayList<Check<Text>>();
static {
testStrings.add(new Check<Text>(new Text("aaaaa"), 0));
testStrings.add(new Check<Text>(new Text("aaabb"), 0));
testStrings.add(new Check<Text>(new Text("aabbb"), 1));
testStrings.add(new Check<Text>(new Text("aaaaa"), 0));
testStrings.add(new Check<Text>(new Text("babbb"), 2));
testStrings.add(new Check<Text>(new Text("baabb"), 1));
testStrings.add(new Check<Text>(new Text("yai"), 8));
testStrings.add(new Check<Text>(new Text("yak"), 9));
testStrings.add(new Check<Text>(new Text("z"), 9));
testStrings.add(new Check<Text>(new Text("ddngo"), 5));
testStrings.add(new Check<Text>(new Text("hi"), 6));
};
private static <T extends WritableComparable<?>> Path writePartitionFile(
String testname, Configuration conf, T[] splits) throws IOException {
final FileSystem fs = FileSystem.getLocal(conf);
final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")
).makeQualified(fs);
Path p = new Path(testdir, testname + "/_partition.lst");
TotalOrderPartitioner.setPartitionFile(conf, p);
conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
SequenceFile.Writer w = null;
try {
w = SequenceFile.createWriter(fs, conf, p,
splits[0].getClass(), NullWritable.class,
SequenceFile.CompressionType.NONE);
for (int i = 0; i < splits.length; ++i) {
w.append(splits[i], NullWritable.get());
}
} finally {
if (null != w)
w.close();
}
return p;
}
public void testTotalOrderMemCmp() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
Configuration conf = new Configuration();
Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
"totalordermemcmp", conf, splitStrings);
conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
try {
partitioner.setConf(conf);
NullWritable nw = NullWritable.get();
for (Check<Text> chk : testStrings) {
assertEquals(chk.data.toString(), chk.part,
partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
}
} finally {
p.getFileSystem(conf).delete(p, true);
}
}
public void testTotalOrderBinarySearch() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
Configuration conf = new Configuration();
Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
"totalorderbinarysearch", conf, splitStrings);
conf.setBoolean(TotalOrderPartitioner.NATURAL_ORDER, false);
conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
try {
partitioner.setConf(conf);
NullWritable nw = NullWritable.get();
for (Check<Text> chk : testStrings) {
assertEquals(chk.data.toString(), chk.part,
partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
}
} finally {
p.getFileSystem(conf).delete(p, true);
}
}
public static class ReverseStringComparator implements RawComparator<Text> {
public int compare(Text a, Text b) {
return -a.compareTo(b);
}
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
return -1 * WritableComparator.compareBytes(b1, s1+n1, l1-n1,
b2, s2+n2, l2-n2);
}
}
public void testTotalOrderCustomComparator() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
Configuration conf = new Configuration();
Text[] revSplitStrings = Arrays.copyOf(splitStrings, splitStrings.length);
Arrays.sort(revSplitStrings, new ReverseStringComparator());
Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
"totalordercustomcomparator", conf, revSplitStrings);
conf.setBoolean(TotalOrderPartitioner.NATURAL_ORDER, false);
conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
conf.setClass(MRJobConfig.KEY_COMPARATOR,
ReverseStringComparator.class, RawComparator.class);
ArrayList<Check<Text>> revCheck = new ArrayList<Check<Text>>();
revCheck.add(new Check<Text>(new Text("aaaaa"), 9));
revCheck.add(new Check<Text>(new Text("aaabb"), 9));
revCheck.add(new Check<Text>(new Text("aabbb"), 9));
revCheck.add(new Check<Text>(new Text("aaaaa"), 9));
revCheck.add(new Check<Text>(new Text("babbb"), 8));
revCheck.add(new Check<Text>(new Text("baabb"), 8));
revCheck.add(new Check<Text>(new Text("yai"), 1));
revCheck.add(new Check<Text>(new Text("yak"), 1));
revCheck.add(new Check<Text>(new Text("z"), 0));
revCheck.add(new Check<Text>(new Text("ddngo"), 4));
revCheck.add(new Check<Text>(new Text("hi"), 3));
try {
partitioner.setConf(conf);
NullWritable nw = NullWritable.get();
for (Check<Text> chk : revCheck) {
assertEquals(chk.data.toString(), chk.part,
partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
}
} finally {
p.getFileSystem(conf).delete(p, true);
}
}
}
| 7,472 | 38.539683 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestBinaryPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.util.ReflectionUtils;
import junit.framework.TestCase;
public class TestBinaryPartitioner extends TestCase {
public void testDefaultOffsets() {
Configuration conf = new Configuration();
BinaryPartitioner<?> partitioner =
ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
BinaryComparable key2 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
int partition1 = partitioner.getPartition(key1, null, 10);
int partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
key2 = new BytesWritable(new byte[] { 6, 2, 3, 4, 5 });
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
key2 = new BytesWritable(new byte[] { 1, 2, 3, 4, 6 });
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
}
public void testCustomOffsets() {
Configuration conf = new Configuration();
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
BinaryComparable key2 = new BytesWritable(new byte[] { 6, 2, 3, 7, 8 });
BinaryPartitioner.setOffsets(conf, 1, -3);
BinaryPartitioner<?> partitioner =
ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
int partition1 = partitioner.getPartition(key1, null, 10);
int partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
BinaryPartitioner.setOffsets(conf, 1, 2);
partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
BinaryPartitioner.setOffsets(conf, -4, -3);
partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
partition1 = partitioner.getPartition(key1, null, 10);
partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
}
public void testLowerBound() {
Configuration conf = new Configuration();
BinaryPartitioner.setLeftOffset(conf, 0);
BinaryPartitioner<?> partitioner =
ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
BinaryComparable key2 = new BytesWritable(new byte[] { 6, 2, 3, 4, 5 });
int partition1 = partitioner.getPartition(key1, null, 10);
int partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
}
public void testUpperBound() {
Configuration conf = new Configuration();
BinaryPartitioner.setRightOffset(conf, 4);
BinaryPartitioner<?> partitioner =
ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
BinaryComparable key2 = new BytesWritable(new byte[] { 1, 2, 3, 4, 6 });
int partition1 = partitioner.getPartition(key1, null, 10);
int partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
}
}
| 4,502 | 42.298077 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestInputSampler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.partition;
import static org.junit.Assert.assertEquals;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.junit.Test;
public class TestInputSampler {
static class SequentialSplit extends InputSplit {
private int i;
SequentialSplit(int i) {
this.i = i;
}
public long getLength() { return 0; }
public String[] getLocations() { return new String[0]; }
public int getInit() { return i; }
}
static class MapredSequentialSplit implements org.apache.hadoop.mapred.InputSplit {
private int i;
MapredSequentialSplit(int i) {
this.i = i;
}
@Override
public long getLength() { return 0; }
@Override
public String[] getLocations() { return new String[0]; }
public int getInit() { return i; }
@Override
public void write(DataOutput out) throws IOException {
}
@Override
public void readFields(DataInput in) throws IOException {
}
}
static class TestInputSamplerIF
extends InputFormat<IntWritable,NullWritable> {
final int maxDepth;
final ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
TestInputSamplerIF(int maxDepth, int numSplits, int... splitInit) {
this.maxDepth = maxDepth;
assert splitInit.length == numSplits;
for (int i = 0; i < numSplits; ++i) {
splits.add(new SequentialSplit(splitInit[i]));
}
}
public List<InputSplit> getSplits(JobContext context)
throws IOException, InterruptedException {
return splits;
}
public RecordReader<IntWritable,NullWritable> createRecordReader(
final InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
return new RecordReader<IntWritable,NullWritable>() {
private int maxVal;
private final IntWritable i = new IntWritable();
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
i.set(((SequentialSplit)split).getInit() - 1);
maxVal = i.get() + maxDepth + 1;
}
public boolean nextKeyValue() {
i.set(i.get() + 1);
return i.get() < maxVal;
}
public IntWritable getCurrentKey() { return i; }
public NullWritable getCurrentValue() { return NullWritable.get(); }
public float getProgress() { return 1.0f; }
public void close() { }
};
}
}
static class TestMapredInputSamplerIF extends TestInputSamplerIF implements
org.apache.hadoop.mapred.InputFormat<IntWritable,NullWritable> {
TestMapredInputSamplerIF(int maxDepth, int numSplits, int... splitInit) {
super(maxDepth, numSplits, splitInit);
}
@Override
public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf job,
int numSplits) throws IOException {
List<InputSplit> splits = null;
try {
splits = getSplits(Job.getInstance(job));
} catch (InterruptedException e) {
throw new IOException(e);
}
org.apache.hadoop.mapred.InputSplit[] retVals =
new org.apache.hadoop.mapred.InputSplit[splits.size()];
for (int i = 0; i < splits.size(); ++i) {
MapredSequentialSplit split = new MapredSequentialSplit(
((SequentialSplit) splits.get(i)).getInit());
retVals[i] = split;
}
return retVals;
}
@Override
public org.apache.hadoop.mapred.RecordReader<IntWritable, NullWritable>
getRecordReader(final org.apache.hadoop.mapred.InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new org.apache.hadoop.mapred.RecordReader
<IntWritable, NullWritable>() {
private final IntWritable i =
new IntWritable(((MapredSequentialSplit)split).getInit());
private int maxVal = i.get() + maxDepth + 1;
@Override
public boolean next(IntWritable key, NullWritable value)
throws IOException {
i.set(i.get() + 1);
return i.get() < maxVal;
}
@Override
public IntWritable createKey() {
return new IntWritable(i.get());
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public long getPos() throws IOException {
return 0;
}
@Override
public void close() throws IOException {
}
@Override
public float getProgress() throws IOException {
return 0;
}
};
}
}
/**
* Verify SplitSampler contract, that an equal number of records are taken
* from the first splits.
*/
@Test
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testSplitSampler() throws Exception {
final int TOT_SPLITS = 15;
final int NUM_SPLITS = 5;
final int STEP_SAMPLE = 5;
final int NUM_SAMPLES = NUM_SPLITS * STEP_SAMPLE;
InputSampler.Sampler<IntWritable,NullWritable> sampler =
new InputSampler.SplitSampler<IntWritable,NullWritable>(
NUM_SAMPLES, NUM_SPLITS);
int inits[] = new int[TOT_SPLITS];
for (int i = 0; i < TOT_SPLITS; ++i) {
inits[i] = i * STEP_SAMPLE;
}
Job ignored = Job.getInstance();
Object[] samples = sampler.getSample(
new TestInputSamplerIF(100000, TOT_SPLITS, inits), ignored);
assertEquals(NUM_SAMPLES, samples.length);
Arrays.sort(samples, new IntWritable.Comparator());
for (int i = 0; i < NUM_SAMPLES; ++i) {
assertEquals(i, ((IntWritable)samples[i]).get());
}
}
/**
* Verify SplitSampler contract in mapred.lib.InputSampler, which is added
* back for binary compatibility of M/R 1.x
*/
@Test (timeout = 30000)
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testMapredSplitSampler() throws Exception {
final int TOT_SPLITS = 15;
final int NUM_SPLITS = 5;
final int STEP_SAMPLE = 5;
final int NUM_SAMPLES = NUM_SPLITS * STEP_SAMPLE;
org.apache.hadoop.mapred.lib.InputSampler.Sampler<IntWritable,NullWritable>
sampler = new org.apache.hadoop.mapred.lib.InputSampler.SplitSampler
<IntWritable,NullWritable>(NUM_SAMPLES, NUM_SPLITS);
int inits[] = new int[TOT_SPLITS];
for (int i = 0; i < TOT_SPLITS; ++i) {
inits[i] = i * STEP_SAMPLE;
}
Object[] samples = sampler.getSample(
new TestMapredInputSamplerIF(100000, TOT_SPLITS, inits),
new JobConf());
assertEquals(NUM_SAMPLES, samples.length);
Arrays.sort(samples, new IntWritable.Comparator());
for (int i = 0; i < NUM_SAMPLES; ++i) {
// mapred.lib.InputSampler.SplitSampler has a sampling step
assertEquals(i % STEP_SAMPLE + TOT_SPLITS * (i / STEP_SAMPLE),
((IntWritable)samples[i]).get());
}
}
/**
* Verify IntervalSampler contract, that samples are taken at regular
* intervals from the given splits.
*/
@Test
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testIntervalSampler() throws Exception {
final int TOT_SPLITS = 16;
final int PER_SPLIT_SAMPLE = 4;
final int NUM_SAMPLES = TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ = 1.0 / TOT_SPLITS;
InputSampler.Sampler<IntWritable,NullWritable> sampler =
new InputSampler.IntervalSampler<IntWritable,NullWritable>(
FREQ, NUM_SAMPLES);
int inits[] = new int[TOT_SPLITS];
for (int i = 0; i < TOT_SPLITS; ++i) {
inits[i] = i;
}
Job ignored = Job.getInstance();
Object[] samples = sampler.getSample(new TestInputSamplerIF(
NUM_SAMPLES, TOT_SPLITS, inits), ignored);
assertEquals(NUM_SAMPLES, samples.length);
Arrays.sort(samples, new IntWritable.Comparator());
for (int i = 0; i < NUM_SAMPLES; ++i) {
assertEquals(i, ((IntWritable)samples[i]).get());
}
}
/**
* Verify IntervalSampler in mapred.lib.InputSampler, which is added back
* for binary compatibility of M/R 1.x
*/
@Test (timeout = 30000)
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testMapredIntervalSampler() throws Exception {
final int TOT_SPLITS = 16;
final int PER_SPLIT_SAMPLE = 4;
final int NUM_SAMPLES = TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ = 1.0 / TOT_SPLITS;
org.apache.hadoop.mapred.lib.InputSampler.Sampler<IntWritable,NullWritable>
sampler = new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler
<IntWritable,NullWritable>(FREQ, NUM_SAMPLES);
int inits[] = new int[TOT_SPLITS];
for (int i = 0; i < TOT_SPLITS; ++i) {
inits[i] = i;
}
Job ignored = Job.getInstance();
Object[] samples = sampler.getSample(new TestInputSamplerIF(
NUM_SAMPLES, TOT_SPLITS, inits), ignored);
assertEquals(NUM_SAMPLES, samples.length);
Arrays.sort(samples, new IntWritable.Comparator());
for (int i = 0; i < NUM_SAMPLES; ++i) {
assertEquals(i,
((IntWritable)samples[i]).get());
}
}
}
| 10,507 | 34.620339 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.jobcontrol;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
/**
*/
public class TestControlledJob {
@Test
public void testAddingDependingJobToRunningJobFails() throws Exception {
Configuration conf = new Configuration();
ControlledJob job1 = new ControlledJob(conf);
job1.setJobState(ControlledJob.State.RUNNING);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
@Test
public void testAddingDependingJobToCompletedJobFails() throws Exception {
Configuration conf = new Configuration();
ControlledJob job1 = new ControlledJob(conf);
job1.setJobState(ControlledJob.State.SUCCESS);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
}
| 1,621 | 33.510638 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.jobcontrol;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doThrow;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.junit.Test;
/**
* Tests the JobControl API using mock and stub Job instances.
*/
public class TestMapReduceJobControlWithMocks {
@Test
public void testSuccessfulJobs() throws Exception {
JobControl jobControl = new JobControl("Test");
ControlledJob job1 = createSuccessfulControlledJob(jobControl);
ControlledJob job2 = createSuccessfulControlledJob(jobControl);
ControlledJob job3 = createSuccessfulControlledJob(jobControl, job1, job2);
ControlledJob job4 = createSuccessfulControlledJob(jobControl, job3);
runJobControl(jobControl);
assertEquals("Success list", 4, jobControl.getSuccessfulJobList().size());
assertEquals("Failed list", 0, jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS);
jobControl.stop();
}
@Test
public void testFailedJob() throws Exception {
JobControl jobControl = new JobControl("Test");
ControlledJob job1 = createFailedControlledJob(jobControl);
ControlledJob job2 = createSuccessfulControlledJob(jobControl);
ControlledJob job3 = createSuccessfulControlledJob(jobControl, job1, job2);
ControlledJob job4 = createSuccessfulControlledJob(jobControl, job3);
runJobControl(jobControl);
assertEquals("Success list", 1, jobControl.getSuccessfulJobList().size());
assertEquals("Failed list", 3, jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
jobControl.stop();
}
@Test
public void testErrorWhileSubmitting() throws Exception {
JobControl jobControl = new JobControl("Test");
Job mockJob = mock(Job.class);
ControlledJob job1 = new ControlledJob(mockJob, null);
when(mockJob.getConfiguration()).thenReturn(new Configuration());
doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit();
jobControl.addJob(job1);
runJobControl(jobControl);
try {
assertEquals("Success list", 0, jobControl.getSuccessfulJobList().size());
assertEquals("Failed list", 1, jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
} finally {
jobControl.stop();
}
}
@Test
public void testKillJob() throws Exception {
JobControl jobControl = new JobControl("Test");
ControlledJob job = createFailedControlledJob(jobControl);
job.killJob();
// Verify that killJob() was called on the mock Job
verify(job.getJob()).killJob();
}
private Job createJob(boolean complete, boolean successful)
throws IOException, InterruptedException {
// Create a stub Job that responds in a controlled way
Job mockJob = mock(Job.class);
when(mockJob.getConfiguration()).thenReturn(new Configuration());
when(mockJob.isComplete()).thenReturn(complete);
when(mockJob.isSuccessful()).thenReturn(successful);
return mockJob;
}
private ControlledJob createControlledJob(JobControl jobControl,
boolean successful, ControlledJob... dependingJobs)
throws IOException, InterruptedException {
List<ControlledJob> dependingJobsList = dependingJobs == null ? null :
Arrays.asList(dependingJobs);
ControlledJob job = new ControlledJob(createJob(true, successful),
dependingJobsList);
jobControl.addJob(job);
return job;
}
private ControlledJob createSuccessfulControlledJob(JobControl jobControl,
ControlledJob... dependingJobs) throws IOException, InterruptedException {
return createControlledJob(jobControl, true, dependingJobs);
}
private ControlledJob createFailedControlledJob(JobControl jobControl,
ControlledJob... dependingJobs) throws IOException, InterruptedException {
return createControlledJob(jobControl, false, dependingJobs);
}
private void runJobControl(JobControl jobControl) {
Thread controller = new Thread(jobControl);
controller.start();
waitTillAllFinished(jobControl);
}
private void waitTillAllFinished(JobControl jobControl) {
while (!jobControl.allFinished()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// ignore
}
}
}
}
| 5,957 | 34.676647 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.junit.Test;
/**
* This class performs unit test for Job/JobControl classes.
*
*/
public class TestMapReduceJobControl extends HadoopTestCase {
public static final Log LOG =
LogFactory.getLog(TestMapReduceJobControl.class.getName());
static Path rootDataDir = new Path(
System.getProperty("test.build.data", "."), "TestData");
static Path indir = new Path(rootDataDir, "indir");
static Path outdir_1 = new Path(rootDataDir, "outdir_1");
static Path outdir_2 = new Path(rootDataDir, "outdir_2");
static Path outdir_3 = new Path(rootDataDir, "outdir_3");
static Path outdir_4 = new Path(rootDataDir, "outdir_4");
static ControlledJob cjob1 = null;
static ControlledJob cjob2 = null;
static ControlledJob cjob3 = null;
static ControlledJob cjob4 = null;
public TestMapReduceJobControl() throws IOException {
super(HadoopTestCase.LOCAL_MR , HadoopTestCase.LOCAL_FS, 2, 2);
}
private void cleanupData(Configuration conf) throws Exception {
FileSystem fs = FileSystem.get(conf);
MapReduceTestUtil.cleanData(fs, indir);
MapReduceTestUtil.generateData(fs, indir);
MapReduceTestUtil.cleanData(fs, outdir_1);
MapReduceTestUtil.cleanData(fs, outdir_2);
MapReduceTestUtil.cleanData(fs, outdir_3);
MapReduceTestUtil.cleanData(fs, outdir_4);
}
/**
* This is a main function for testing JobControl class.
* It requires 4 jobs:
* Job 1: passed as parameter. input:indir output:outdir_1
* Job 2: copy data from indir to outdir_2
* Job 3: copy data from outdir_1 and outdir_2 to outdir_3
* Job 4: copy data from outdir to outdir_4
* The jobs 1 and 2 have no dependency. The job 3 depends on jobs 1 and 2.
* The job 4 depends on job 3.
*
* Then it creates a JobControl object and add the 4 jobs to
* the JobControl object.
* Finally, it creates a thread to run the JobControl object
*/
private JobControl createDependencies(Configuration conf, Job job1)
throws Exception {
List<ControlledJob> dependingJobs = null;
cjob1 = new ControlledJob(job1, dependingJobs);
Job job2 = MapReduceTestUtil.createCopyJob(conf, outdir_2, indir);
cjob2 = new ControlledJob(job2, dependingJobs);
Job job3 = MapReduceTestUtil.createCopyJob(conf, outdir_3,
outdir_1, outdir_2);
dependingJobs = new ArrayList<ControlledJob>();
dependingJobs.add(cjob1);
dependingJobs.add(cjob2);
cjob3 = new ControlledJob(job3, dependingJobs);
Job job4 = MapReduceTestUtil.createCopyJob(conf, outdir_4, outdir_3);
dependingJobs = new ArrayList<ControlledJob>();
dependingJobs.add(cjob3);
cjob4 = new ControlledJob(job4, dependingJobs);
JobControl theControl = new JobControl("Test");
theControl.addJob(cjob1);
theControl.addJob(cjob2);
theControl.addJob(cjob3);
theControl.addJob(cjob4);
Thread theController = new Thread(theControl);
theController.start();
return theControl;
}
private void waitTillAllFinished(JobControl theControl) {
while (!theControl.allFinished()) {
try {
Thread.sleep(100);
} catch (Exception e) {}
}
}
public void testJobControlWithFailJob() throws Exception {
LOG.info("Starting testJobControlWithFailJob");
Configuration conf = createJobConf();
cleanupData(conf);
// create a Fail job
Job job1 = MapReduceTestUtil.createFailJob(conf, outdir_1, indir);
// create job dependencies
JobControl theControl = createDependencies(conf, job1);
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
theControl.stop();
}
public void testJobControlWithKillJob() throws Exception {
LOG.info("Starting testJobControlWithKillJob");
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
// verify adding dependingJo to RUNNING job fails.
assertFalse(cjob1.addDependingJob(cjob2));
// suspend jobcontrol and resume it again
theControl.suspend();
assertTrue(
theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
theControl.resume();
// kill the first job.
cjob1.killJob();
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
theControl.stop();
}
public void testJobControl() throws Exception {
LOG.info("Starting testJobControl");
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createCopyJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertEquals("Some jobs failed", 0, theControl.getFailedJobList().size());
theControl.stop();
}
@Test(timeout = 30000)
public void testControlledJob() throws Exception {
LOG.info("Starting testControlledJob");
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createCopyJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
Assert.assertNotNull(cjob1.getMapredJobId());
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertEquals("Some jobs failed", 0, theControl.getFailedJobList().size());
theControl.stop();
}
}
| 7,753 | 33.15859 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import java.util.List;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
public class TestJoinProperties extends TestCase {
private static MiniDFSCluster cluster = null;
final static int SOURCES = 3;
final static int ITEMS = (SOURCES + 1) * (SOURCES + 1);
static int[][] source = new int[SOURCES][];
static Path[] src;
static Path base;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf);
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
// Sources from 0 to srcs-2 have IntWritable key and IntWritable value
// src-1 source has IntWritable key and LongWritable value.
private static SequenceFile.Writer[] createWriters(Path testdir,
Configuration conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs - 1; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], IntWritable.class, IntWritable.class);
}
out[srcs - 1] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[srcs - 1], IntWritable.class, LongWritable.class);
return out;
}
private static String stringify(IntWritable key, Writable val) {
StringBuilder sb = new StringBuilder();
sb.append("(" + key);
sb.append("," + val + ")");
return sb.toString();
}
private static Path[] generateSources(Configuration conf)
throws IOException {
for (int i = 0; i < SOURCES; ++i) {
source[i] = new int[ITEMS];
for (int j = 0; j < ITEMS; ++j) {
source[i][j] = (i + 2) * (j + 1);
}
}
Path[] src = new Path[SOURCES];
SequenceFile.Writer out[] = createWriters(base, conf, SOURCES, src);
IntWritable k = new IntWritable();
for (int i = 0; i < SOURCES; ++i) {
Writable v;
if (i != SOURCES -1) {
v = new IntWritable();
((IntWritable)v).set(i);
} else {
v = new LongWritable();
((LongWritable)v).set(i);
}
for (int j = 0; j < ITEMS; ++j) {
k.set(source[i][j]);
out[i].append(k, v);
}
out[i].close();
}
return src;
}
private String A() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[0].toString());
}
private String B() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[1].toString());
}
private String C() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[2].toString());
}
// construct op(op(A,B),C)
private String constructExpr1(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(" +op +"(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append("),");
sb.append(C());
sb.append(")");
return sb.toString();
}
// construct op(A,op(B,C))
private String constructExpr2(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(");
sb.append(A());
sb.append(",");
sb.append(op +"(");
sb.append(B());
sb.append(",");
sb.append(C());
sb.append("))");
return sb.toString();
}
// construct op(A, B, C))
private String constructExpr3(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append(",");
sb.append(C());
sb.append(")");
return sb.toString();
}
// construct override(inner(A, B), A)
private String constructExpr4() {
StringBuilder sb = new StringBuilder();
sb.append("override(inner(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append("),");
sb.append(A());
sb.append(")");
return sb.toString();
}
enum TestType {OUTER_ASSOCIATIVITY, INNER_IDENTITY, INNER_ASSOCIATIVITY}
private void validateKeyValue(WritableComparable<?> k, Writable v,
int tupleSize, boolean firstTuple, boolean secondTuple,
TestType ttype) throws IOException {
System.out.println("out k:" + k + " v:" + v);
if (ttype.equals(TestType.OUTER_ASSOCIATIVITY)) {
validateOuterKeyValue((IntWritable)k, (TupleWritable)v, tupleSize,
firstTuple, secondTuple);
} else if (ttype.equals(TestType.INNER_ASSOCIATIVITY)) {
validateInnerKeyValue((IntWritable)k, (TupleWritable)v, tupleSize,
firstTuple, secondTuple);
}
if (ttype.equals(TestType.INNER_IDENTITY)) {
validateKeyValue_INNER_IDENTITY((IntWritable)k, (IntWritable)v);
}
}
private void testExpr1(Configuration conf, String op, TestType ttype,
int expectedCount) throws Exception {
String joinExpr = constructExpr1(op);
conf.set(CompositeInputFormat.JOIN_EXPR, joinExpr);
int count = testFormat(conf, 2, true, false, ttype);
assertTrue("not all keys present", count == expectedCount);
}
private void testExpr2(Configuration conf, String op, TestType ttype,
int expectedCount) throws Exception {
String joinExpr = constructExpr2(op);
conf.set(CompositeInputFormat.JOIN_EXPR, joinExpr);
int count = testFormat(conf, 2, false, true, ttype);
assertTrue("not all keys present", count == expectedCount);
}
private void testExpr3(Configuration conf, String op, TestType ttype,
int expectedCount) throws Exception {
String joinExpr = constructExpr3(op);
conf.set(CompositeInputFormat.JOIN_EXPR, joinExpr);
int count = testFormat(conf, 3, false, false, ttype);
assertTrue("not all keys present", count == expectedCount);
}
private void testExpr4(Configuration conf) throws Exception {
String joinExpr = constructExpr4();
conf.set(CompositeInputFormat.JOIN_EXPR, joinExpr);
int count = testFormat(conf, 0, false, false, TestType.INNER_IDENTITY);
assertTrue("not all keys present", count == ITEMS);
}
// outer(outer(A, B), C) == outer(A,outer(B, C)) == outer(A, B, C)
public void testOuterAssociativity() throws Exception {
Configuration conf = new Configuration();
testExpr1(conf, "outer", TestType.OUTER_ASSOCIATIVITY, 33);
testExpr2(conf, "outer", TestType.OUTER_ASSOCIATIVITY, 33);
testExpr3(conf, "outer", TestType.OUTER_ASSOCIATIVITY, 33);
}
// inner(inner(A, B), C) == inner(A,inner(B, C)) == inner(A, B, C)
public void testInnerAssociativity() throws Exception {
Configuration conf = new Configuration();
testExpr1(conf, "inner", TestType.INNER_ASSOCIATIVITY, 2);
testExpr2(conf, "inner", TestType.INNER_ASSOCIATIVITY, 2);
testExpr3(conf, "inner", TestType.INNER_ASSOCIATIVITY, 2);
}
// override(inner(A, B), A) == A
public void testIdentity() throws Exception {
Configuration conf = new Configuration();
testExpr4(conf);
}
private void validateOuterKeyValue(IntWritable k, TupleWritable v,
int tupleSize, boolean firstTuple, boolean secondTuple) {
final String kvstr = "Unexpected tuple: " + stringify(k, v);
assertTrue(kvstr, v.size() == tupleSize);
int key = k.get();
IntWritable val0 = null;
IntWritable val1 = null;
LongWritable val2 = null;
if (firstTuple) {
TupleWritable v0 = ((TupleWritable)v.get(0));
if (key % 2 == 0 && key / 2 <= ITEMS) {
val0 = (IntWritable)v0.get(0);
} else {
assertFalse(kvstr, v0.has(0));
}
if (key % 3 == 0 && key / 3 <= ITEMS) {
val1 = (IntWritable)v0.get(1);
} else {
assertFalse(kvstr, v0.has(1));
}
if (key % 4 == 0 && key / 4 <= ITEMS) {
val2 = (LongWritable)v.get(1);
} else {
assertFalse(kvstr, v.has(2));
}
} else if (secondTuple) {
if (key % 2 == 0 && key / 2 <= ITEMS) {
val0 = (IntWritable)v.get(0);
} else {
assertFalse(kvstr, v.has(0));
}
TupleWritable v1 = ((TupleWritable)v.get(1));
if (key % 3 == 0 && key / 3 <= ITEMS) {
val1 = (IntWritable)v1.get(0);
} else {
assertFalse(kvstr, v1.has(0));
}
if (key % 4 == 0 && key / 4 <= ITEMS) {
val2 = (LongWritable)v1.get(1);
} else {
assertFalse(kvstr, v1.has(1));
}
} else {
if (key % 2 == 0 && key / 2 <= ITEMS) {
val0 = (IntWritable)v.get(0);
} else {
assertFalse(kvstr, v.has(0));
}
if (key % 3 == 0 && key / 3 <= ITEMS) {
val1 = (IntWritable)v.get(1);
} else {
assertFalse(kvstr, v.has(1));
}
if (key % 4 == 0 && key / 4 <= ITEMS) {
val2 = (LongWritable)v.get(2);
} else {
assertFalse(kvstr, v.has(2));
}
}
if (val0 != null) {
assertTrue(kvstr, val0.get() == 0);
}
if (val1 != null) {
assertTrue(kvstr, val1.get() == 1);
}
if (val2 != null) {
assertTrue(kvstr, val2.get() == 2);
}
}
private void validateInnerKeyValue(IntWritable k, TupleWritable v,
int tupleSize, boolean firstTuple, boolean secondTuple) {
final String kvstr = "Unexpected tuple: " + stringify(k, v);
assertTrue(kvstr, v.size() == tupleSize);
int key = k.get();
IntWritable val0 = null;
IntWritable val1 = null;
LongWritable val2 = null;
assertTrue(kvstr, key % 2 == 0 && key / 2 <= ITEMS);
assertTrue(kvstr, key % 3 == 0 && key / 3 <= ITEMS);
assertTrue(kvstr, key % 4 == 0 && key / 4 <= ITEMS);
if (firstTuple) {
TupleWritable v0 = ((TupleWritable)v.get(0));
val0 = (IntWritable)v0.get(0);
val1 = (IntWritable)v0.get(1);
val2 = (LongWritable)v.get(1);
} else if (secondTuple) {
val0 = (IntWritable)v.get(0);
TupleWritable v1 = ((TupleWritable)v.get(1));
val1 = (IntWritable)v1.get(0);
val2 = (LongWritable)v1.get(1);
} else {
val0 = (IntWritable)v.get(0);
val1 = (IntWritable)v.get(1);
val2 = (LongWritable)v.get(2);
}
assertTrue(kvstr, val0.get() == 0);
assertTrue(kvstr, val1.get() == 1);
assertTrue(kvstr, val2.get() == 2);
}
private void validateKeyValue_INNER_IDENTITY(IntWritable k, IntWritable v) {
final String kvstr = "Unexpected tuple: " + stringify(k, v);
int key = k.get();
assertTrue(kvstr, (key % 2 == 0 && key / 2 <= ITEMS));
assertTrue(kvstr, v.get() == 0);
}
@SuppressWarnings("unchecked")
public int testFormat(Configuration conf, int tupleSize,
boolean firstTuple, boolean secondTuple, TestType ttype)
throws Exception {
Job job = Job.getInstance(conf);
CompositeInputFormat format = new CompositeInputFormat();
int count = 0;
for (InputSplit split : (List<InputSplit>)format.getSplits(job)) {
TaskAttemptContext context =
MapReduceTestUtil.createDummyMapTaskAttemptContext(conf);
RecordReader reader = format.createRecordReader(
split, context);
MapContext mcontext =
new MapContextImpl(conf,
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
WritableComparable key = null;
Writable value = null;
while (reader.nextKeyValue()) {
key = (WritableComparable) reader.getCurrentKey();
value = (Writable) reader.getCurrentValue();
validateKeyValue(key, value,
tupleSize, firstTuple, secondTuple, ttype);
count++;
}
}
return count;
}
}
| 13,337 | 32.596977 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinTupleWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.util.Arrays;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
public class TestJoinTupleWritable extends TestCase {
private TupleWritable makeTuple(Writable[] writs) {
Writable[] sub1 = { writs[1], writs[2] };
Writable[] sub3 = { writs[4], writs[5] };
Writable[] sub2 = { writs[3], new TupleWritable(sub3), writs[6] };
Writable[] vals = { writs[0], new TupleWritable(sub1),
new TupleWritable(sub2), writs[7], writs[8],
writs[9] };
// [v0, [v1, v2], [v3, [v4, v5], v6], v7, v8, v9]
TupleWritable ret = new TupleWritable(vals);
for (int i = 0; i < 6; ++i) {
ret.setWritten(i);
}
((TupleWritable)sub2[1]).setWritten(0);
((TupleWritable)sub2[1]).setWritten(1);
((TupleWritable)vals[1]).setWritten(0);
((TupleWritable)vals[1]).setWritten(1);
for (int i = 0; i < 3; ++i) {
((TupleWritable)vals[2]).setWritten(i);
}
return ret;
}
private Writable[] makeRandomWritables() {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
return writs;
}
private Writable[] makeRandomWritables(int numWrits)
{
Writable[] writs = makeRandomWritables();
Writable[] manyWrits = new Writable[numWrits];
for (int i =0; i<manyWrits.length; i++)
{
manyWrits[i] = writs[i%writs.length];
}
return manyWrits;
}
private int verifIter(Writable[] writs, TupleWritable t, int i) {
for (Writable w : t) {
if (w instanceof TupleWritable) {
i = verifIter(writs, ((TupleWritable)w), i);
continue;
}
assertTrue("Bad value", w.equals(writs[i++]));
}
return i;
}
public void testIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable t = new TupleWritable(writs);
for (int i = 0; i < 6; ++i) {
t.setWritten(i);
}
verifIter(writs, t, 0);
}
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
public void testWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
if (i % 3 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",
-1, in.read());
}
public void testWideWritable2() throws Exception {
Writable[] manyWrits = makeRandomWritables(71);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
sTuple.setWritten(i);
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",
-1, in.read());
}
/**
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i =0; i<manyWrits.length; i++)
{
if (i % 65 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",
-1, in.read());
}
public void testWideTuple() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
Arrays.fill(values,emptyText);
values[42] = new Text("Number 42");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(42);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 42) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos,
has);
}
}
}
public void testWideTuple2() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
Arrays.fill(values,emptyText);
values[9] = new Text("Number 9");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(9);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 9) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos,
has);
}
}
}
/**
* Tests that we can write more than 64 values.
*/
public void testWideTupleBoundary() throws Exception {
Text emptyText = new Text("Should not be set written");
Writable[] values = new Writable[65];
Arrays.fill(values,emptyText);
values[64] = new Text("Should be the only value set written");
TupleWritable tuple = new TupleWritable(values);
tuple.setWritten(64);
for (int pos=0; pos<tuple.size();pos++) {
boolean has = tuple.has(pos);
if (pos == 64) {
assertTrue(has);
}
else {
assertFalse("Tuple position is incorrectly labelled as set: " + pos,
has);
}
}
}
}
| 9,730 | 32.555172 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import java.io.IOException;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
public class TestJoinDatamerge extends TestCase {
private static MiniDFSCluster cluster = null;
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinDatamerge.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
private static SequenceFile.Writer[] createWriters(Path testdir,
Configuration conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], IntWritable.class, IntWritable.class);
}
return out;
}
private static Path[] writeSimpleSrc(Path testdir, Configuration conf,
int srcs) throws IOException {
SequenceFile.Writer out[] = null;
Path[] src = new Path[srcs];
try {
out = createWriters(testdir, conf, srcs, src);
final int capacity = srcs * 2 + 1;
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int k = 0; k < capacity; ++k) {
for (int i = 0; i < srcs; ++i) {
key.set(k % srcs == 0 ? k * srcs : k * srcs + i);
val.set(10 * k + i);
out[i].append(key, val);
if (i == k) {
// add duplicate key
out[i].append(key, val);
}
}
}
} finally {
if (out != null) {
for (int i = 0; i < srcs; ++i) {
if (out[i] != null)
out[i].close();
}
}
}
return src;
}
private static String stringify(IntWritable key, Writable val) {
StringBuilder sb = new StringBuilder();
sb.append("(" + key);
sb.append("," + val + ")");
return sb.toString();
}
private static abstract class SimpleCheckerMapBase<V extends Writable>
extends Mapper<IntWritable, V, IntWritable, IntWritable>{
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void setup(Context context) {
srcs = context.getConfiguration().getInt("testdatamerge.sources", 0);
assertTrue("Invalid src count: " + srcs, srcs > 0);
}
}
private static abstract class SimpleCheckerReduceBase
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void setup(Context context) {
srcs = context.getConfiguration().getInt("testdatamerge.sources", 0);
assertTrue("Invalid src count: " + srcs, srcs > 0);
}
public void reduce(IntWritable key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int seen = 0;
for (IntWritable value : values) {
seen += value.get();
}
assertTrue("Bad count for " + key.get(), verify(key.get(), seen));
context.write(key, new IntWritable(seen));
}
public abstract boolean verify(int key, int occ);
}
private static class InnerJoinMapChecker
extends SimpleCheckerMapBase<TupleWritable> {
public void map(IntWritable key, TupleWritable val, Context context)
throws IOException, InterruptedException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
assertTrue(kvstr, 0 == k % (srcs * srcs));
for (int i = 0; i < val.size(); ++i) {
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, (vali - i) * srcs == 10 * k);
}
context.write(key, one);
// If the user modifies the key or any of the values in the tuple, it
// should not affect the rest of the join.
key.set(-1);
if (val.has(0)) {
((IntWritable)val.get(0)).set(0);
}
}
}
private static class InnerJoinReduceChecker
extends SimpleCheckerReduceBase {
public boolean verify(int key, int occ) {
return (key == 0 && occ == 2) ||
(key != 0 && (key % (srcs * srcs) == 0) && occ == 1);
}
}
private static class OuterJoinMapChecker
extends SimpleCheckerMapBase<TupleWritable> {
public void map(IntWritable key, TupleWritable val, Context context)
throws IOException, InterruptedException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
if (0 == k % (srcs * srcs)) {
for (int i = 0; i < val.size(); ++i) {
assertTrue(kvstr, val.get(i) instanceof IntWritable);
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, (vali - i) * srcs == 10 * k);
}
} else {
for (int i = 0; i < val.size(); ++i) {
if (i == k % srcs) {
assertTrue(kvstr, val.get(i) instanceof IntWritable);
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, srcs * (vali - i) == 10 * (k - i));
} else {
assertTrue(kvstr, !val.has(i));
}
}
}
context.write(key, one);
//If the user modifies the key or any of the values in the tuple, it
// should not affect the rest of the join.
key.set(-1);
if (val.has(0)) {
((IntWritable)val.get(0)).set(0);
}
}
}
private static class OuterJoinReduceChecker
extends SimpleCheckerReduceBase {
public boolean verify(int key, int occ) {
if (key < srcs * srcs && (key % (srcs + 1)) == 0) {
return 2 == occ;
}
return 1 == occ;
}
}
private static class OverrideMapChecker
extends SimpleCheckerMapBase<IntWritable> {
public void map(IntWritable key, IntWritable val, Context context)
throws IOException, InterruptedException {
int k = key.get();
final int vali = val.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
if (0 == k % (srcs * srcs)) {
assertTrue(kvstr, vali == k * 10 / srcs + srcs - 1);
} else {
final int i = k % srcs;
assertTrue(kvstr, srcs * (vali - i) == 10 * (k - i));
}
context.write(key, one);
//If the user modifies the key or any of the values in the tuple, it
// should not affect the rest of the join.
key.set(-1);
val.set(0);
}
}
private static class OverrideReduceChecker
extends SimpleCheckerReduceBase {
public boolean verify(int key, int occ) {
if (key < srcs * srcs && (key % (srcs + 1)) == 0 && key != 0) {
return 2 == occ;
}
return 1 == occ;
}
}
private static void joinAs(String jointype,
Class<? extends SimpleCheckerMapBase<?>> map,
Class<? extends SimpleCheckerReduceBase> reduce) throws Exception {
final int srcs = 4;
Configuration conf = new Configuration();
Path base = cluster.getFileSystem().makeQualified(new Path("/"+jointype));
Path[] src = writeSimpleSrc(base, conf, srcs);
conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose(jointype,
SequenceFileInputFormat.class, src));
conf.setInt("testdatamerge.sources", srcs);
Job job = Job.getInstance(conf);
job.setInputFormatClass(CompositeInputFormat.class);
FileOutputFormat.setOutputPath(job, new Path(base, "out"));
job.setMapperClass(map);
job.setReducerClass(reduce);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
if ("outer".equals(jointype)) {
checkOuterConsistency(job, src);
}
base.getFileSystem(conf).delete(base, true);
}
public void testSimpleInnerJoin() throws Exception {
joinAs("inner", InnerJoinMapChecker.class, InnerJoinReduceChecker.class);
}
public void testSimpleOuterJoin() throws Exception {
joinAs("outer", OuterJoinMapChecker.class, OuterJoinReduceChecker.class);
}
private static void checkOuterConsistency(Job job, Path[] src)
throws IOException {
Path outf = FileOutputFormat.getOutputPath(job);
FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new
Utils.OutputFileUtils.OutputFilesFilter());
assertEquals("number of part files is more than 1. It is" + outlist.length,
1, outlist.length);
assertTrue("output file with zero length" + outlist[0].getLen(),
0 < outlist[0].getLen());
SequenceFile.Reader r =
new SequenceFile.Reader(cluster.getFileSystem(),
outlist[0].getPath(), job.getConfiguration());
IntWritable k = new IntWritable();
IntWritable v = new IntWritable();
while (r.next(k, v)) {
assertEquals("counts does not match", v.get(),
countProduct(k, src, job.getConfiguration()));
}
r.close();
}
private static int countProduct(IntWritable key, Path[] src,
Configuration conf) throws IOException {
int product = 1;
for (Path p : src) {
int count = 0;
SequenceFile.Reader r = new SequenceFile.Reader(
cluster.getFileSystem(), p, conf);
IntWritable k = new IntWritable();
IntWritable v = new IntWritable();
while (r.next(k, v)) {
if (k.equals(key)) {
count++;
}
}
r.close();
if (count != 0) {
product *= count;
}
}
return product;
}
public void testSimpleOverride() throws Exception {
joinAs("override", OverrideMapChecker.class, OverrideReduceChecker.class);
}
public void testNestedJoin() throws Exception {
// outer(inner(S1,...,Sn),outer(S1,...Sn))
final int SOURCES = 3;
final int ITEMS = (SOURCES + 1) * (SOURCES + 1);
Configuration conf = new Configuration();
Path base = cluster.getFileSystem().makeQualified(new Path("/nested"));
int[][] source = new int[SOURCES][];
for (int i = 0; i < SOURCES; ++i) {
source[i] = new int[ITEMS];
for (int j = 0; j < ITEMS; ++j) {
source[i][j] = (i + 2) * (j + 1);
}
}
Path[] src = new Path[SOURCES];
SequenceFile.Writer out[] = createWriters(base, conf, SOURCES, src);
IntWritable k = new IntWritable();
for (int i = 0; i < SOURCES; ++i) {
IntWritable v = new IntWritable();
v.set(i);
for (int j = 0; j < ITEMS; ++j) {
k.set(source[i][j]);
out[i].append(k, v);
}
out[i].close();
}
out = null;
StringBuilder sb = new StringBuilder();
sb.append("outer(inner(");
for (int i = 0; i < SOURCES; ++i) {
sb.append(CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[i].toString()));
if (i + 1 != SOURCES) sb.append(",");
}
sb.append("),outer(");
sb.append(CompositeInputFormat.compose(
MapReduceTestUtil.Fake_IF.class, "foobar"));
sb.append(",");
for (int i = 0; i < SOURCES; ++i) {
sb.append(
CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[i].toString()));
sb.append(",");
}
sb.append(CompositeInputFormat.compose(
MapReduceTestUtil.Fake_IF.class, "raboof") + "))");
conf.set(CompositeInputFormat.JOIN_EXPR, sb.toString());
MapReduceTestUtil.Fake_IF.setKeyClass(conf, IntWritable.class);
MapReduceTestUtil.Fake_IF.setValClass(conf, IntWritable.class);
Job job = Job.getInstance(conf);
Path outf = new Path(base, "out");
FileOutputFormat.setOutputPath(job, outf);
job.setInputFormatClass(CompositeInputFormat.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(TupleWritable.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
FileStatus[] outlist = cluster.getFileSystem().listStatus(outf,
new Utils.OutputFileUtils.OutputFilesFilter());
assertEquals(1, outlist.length);
assertTrue(0 < outlist[0].getLen());
SequenceFile.Reader r =
new SequenceFile.Reader(cluster.getFileSystem(),
outlist[0].getPath(), conf);
TupleWritable v = new TupleWritable();
while (r.next(k, v)) {
assertFalse(((TupleWritable)v.get(1)).has(0));
assertFalse(((TupleWritable)v.get(1)).has(SOURCES + 1));
boolean chk = true;
int ki = k.get();
for (int i = 2; i < SOURCES + 2; ++i) {
if ((ki % i) == 0 && ki <= i * ITEMS) {
assertEquals(i - 2, ((IntWritable)
((TupleWritable)v.get(1)).get((i - 1))).get());
} else chk = false;
}
if (chk) { // present in all sources; chk inner
assertTrue(v.has(0));
for (int i = 0; i < SOURCES; ++i)
assertTrue(((TupleWritable)v.get(0)).has(i));
} else { // should not be present in inner join
assertFalse(v.has(0));
}
}
r.close();
base.getFileSystem(conf).delete(base, true);
}
public void testEmptyJoin() throws Exception {
Configuration conf = new Configuration();
Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer",
MapReduceTestUtil.Fake_IF.class, src));
MapReduceTestUtil.Fake_IF.setKeyClass(conf,
MapReduceTestUtil.IncomparableKey.class);
Job job = Job.getInstance(conf);
job.setInputFormatClass(CompositeInputFormat.class);
FileOutputFormat.setOutputPath(job, new Path(base, "out"));
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setOutputKeyClass(MapReduceTestUtil.IncomparableKey.class);
job.setOutputValueClass(NullWritable.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
base.getFileSystem(conf).delete(base, true);
}
}
| 16,033 | 34.790179 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.join;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.MapReduceTestUtil.Fake_RR;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
public class TestWrappedRRClassloader extends TestCase {
/**
* Tests the class loader set by
* {@link Configuration#setClassLoader(ClassLoader)}
* is inherited by any {@link WrappedRecordReader}s created by
* {@link CompositeRecordReader}
*/
public void testClassLoader() throws Exception {
Configuration conf = new Configuration();
Fake_ClassLoader classLoader = new Fake_ClassLoader();
conf.setClassLoader(classLoader);
assertTrue(conf.getClassLoader() instanceof Fake_ClassLoader);
FileSystem fs = FileSystem.get(conf);
Path testdir = new Path(System.getProperty("test.build.data", "/tmp"))
.makeQualified(fs);
Path base = new Path(testdir, "/empty");
Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
conf.set(CompositeInputFormat.JOIN_EXPR,
CompositeInputFormat.compose("outer", IF_ClassLoaderChecker.class, src));
CompositeInputFormat<NullWritable> inputFormat =
new CompositeInputFormat<NullWritable>();
// create dummy TaskAttemptID
TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, tid.toString());
inputFormat.createRecordReader
(inputFormat.getSplits(Job.getInstance(conf)).get(0),
new TaskAttemptContextImpl(conf, tid));
}
public static class Fake_ClassLoader extends ClassLoader {
}
public static class IF_ClassLoaderChecker<K, V>
extends MapReduceTestUtil.Fake_IF<K, V> {
public IF_ClassLoaderChecker() {
}
public RecordReader<K, V> createRecordReader(InputSplit ignored,
TaskAttemptContext context) {
return new RR_ClassLoaderChecker<K, V>(context.getConfiguration());
}
}
public static class RR_ClassLoaderChecker<K, V> extends Fake_RR<K, V> {
@SuppressWarnings("unchecked")
public RR_ClassLoaderChecker(Configuration conf) {
assertTrue("The class loader has not been inherited from "
+ CompositeRecordReader.class.getSimpleName(),
conf.getClassLoader() instanceof Fake_ClassLoader);
}
}
}
| 3,309 | 36.613636 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/fieldsel/TestMRFieldSelection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.fieldsel;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import junit.framework.TestCase;
import java.text.NumberFormat;
public class TestMRFieldSelection extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
public void testFieldSelection() throws Exception {
launch();
}
private static Path testDir = new Path(
System.getProperty("test.build.data", "/tmp"), "field");
public static void launch() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
int numOfInputLines = 10;
Path outDir = new Path(testDir, "output_for_field_selection_test");
Path inDir = new Path(testDir, "input_for_field_selection_test");
StringBuffer inputData = new StringBuffer();
StringBuffer expectedOutput = new StringBuffer();
constructInputOutputData(inputData, expectedOutput, numOfInputLines);
conf.set(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "-");
conf.set(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "6,5,1-3:0-");
conf.set(
FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, ":4,3,2,1,0,0-");
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
1, 1, inputData.toString());
job.setMapperClass(FieldSelectionMapper.class);
job.setReducerClass(FieldSelectionReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
job.waitForCompletion(true);
assertTrue("Job Failed!", job.isSuccessful());
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
String outdata = MapReduceTestUtil.readOutput(outDir, conf);
assertEquals("Outputs doesnt match.",expectedOutput.toString(), outdata);
fs.delete(outDir, true);
}
public static void constructInputOutputData(StringBuffer inputData,
StringBuffer expectedOutput, int numOfInputLines) {
for (int i = 0; i < numOfInputLines; i++) {
inputData.append(idFormat.format(i));
inputData.append("-").append(idFormat.format(i+1));
inputData.append("-").append(idFormat.format(i+2));
inputData.append("-").append(idFormat.format(i+3));
inputData.append("-").append(idFormat.format(i+4));
inputData.append("-").append(idFormat.format(i+5));
inputData.append("-").append(idFormat.format(i+6));
inputData.append("\n");
expectedOutput.append(idFormat.format(i+3));
expectedOutput.append("-" ).append (idFormat.format(i+2));
expectedOutput.append("-" ).append (idFormat.format(i+1));
expectedOutput.append("-" ).append (idFormat.format(i+5));
expectedOutput.append("-" ).append (idFormat.format(i+6));
expectedOutput.append("-" ).append (idFormat.format(i+6));
expectedOutput.append("-" ).append (idFormat.format(i+5));
expectedOutput.append("-" ).append (idFormat.format(i+1));
expectedOutput.append("-" ).append (idFormat.format(i+2));
expectedOutput.append("-" ).append (idFormat.format(i+3));
expectedOutput.append("-" ).append (idFormat.format(i+0));
expectedOutput.append("-" ).append (idFormat.format(i+1));
expectedOutput.append("-" ).append (idFormat.format(i+2));
expectedOutput.append("-" ).append (idFormat.format(i+3));
expectedOutput.append("-" ).append (idFormat.format(i+4));
expectedOutput.append("-" ).append (idFormat.format(i+5));
expectedOutput.append("-" ).append (idFormat.format(i+6));
expectedOutput.append("\n");
}
System.out.println("inputData:");
System.out.println(inputData.toString());
System.out.println("ExpectedData:");
System.out.println(expectedOutput.toString());
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}
| 5,034 | 39.28 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/map/TestMultithreadedMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.*;
import java.io.IOException;
public class TestMultithreadedMapper extends HadoopTestCase {
public TestMultithreadedMapper() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
public void testOKRun() throws Exception {
run(false, false);
}
public void testIOExRun() throws Exception {
run(true, false);
}
public void testRuntimeExRun() throws Exception {
run(false, true);
}
private void run(boolean ioEx, boolean rtEx) throws Exception {
String localPathRoot = System.getProperty("test.build.data", "/tmp");
Path inDir = new Path(localPathRoot, "testing/mt/input");
Path outDir = new Path(localPathRoot, "testing/mt/output");
Configuration conf = createJobConf();
if (ioEx) {
conf.setBoolean("multithreaded.ioException", true);
}
if (rtEx) {
conf.setBoolean("multithreaded.runtimeException", true);
}
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1);
job.setJobName("mt");
job.setMapperClass(MultithreadedMapper.class);
MultithreadedMapper.setMapperClass(job, IDMap.class);
MultithreadedMapper.setNumberOfThreads(job, 2);
job.setReducerClass(Reducer.class);
job.waitForCompletion(true);
if (job.isSuccessful()) {
assertFalse(ioEx || rtEx);
}
else {
assertTrue(ioEx || rtEx);
}
}
public static class IDMap extends
Mapper<LongWritable, Text, LongWritable, Text> {
private boolean ioEx = false;
private boolean rtEx = false;
public void setup(Context context) {
ioEx = context.getConfiguration().
getBoolean("multithreaded.ioException", false);
rtEx = context.getConfiguration().
getBoolean("multithreaded.runtimeException", false);
}
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
if (ioEx) {
throw new IOException();
}
if (rtEx) {
throw new RuntimeException();
}
super.map(key, value, context);
}
}
}
| 3,186 | 29.941748 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import static org.apache.hadoop.test.MockitoMaker.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
public class TestMRCJCFileInputFormat {
@Test
public void testAddInputPath() throws IOException {
final Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///abc/");
final Job j = Job.getInstance(conf);
//setup default fs
final FileSystem defaultfs = FileSystem.get(conf);
System.out.println("defaultfs.getUri() = " + defaultfs.getUri());
{
//test addInputPath
final Path original = new Path("file:/foo");
System.out.println("original = " + original);
FileInputFormat.addInputPath(j, original);
final Path[] results = FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1, results.length);
assertEquals(original, results[0]);
}
{
//test setInputPaths
final Path original = new Path("file:/bar");
System.out.println("original = " + original);
FileInputFormat.setInputPaths(j, original);
final Path[] results = FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1, results.length);
assertEquals(original, results[0]);
}
}
@Test
public void testNumInputFiles() throws Exception {
Configuration conf = spy(new Configuration());
Job job = make(stub(Job.class).returning(conf).from.getConfiguration());
FileStatus stat = make(stub(FileStatus.class).returning(0L).from.getLen());
TextInputFormat ispy = spy(new TextInputFormat());
doReturn(Arrays.asList(stat)).when(ispy).listStatus(job);
ispy.getSplits(job);
verify(conf).setLong(FileInputFormat.NUM_INPUT_FILES, 1);
}
@Test
@SuppressWarnings({"rawtypes", "unchecked"})
public void testLastInputSplitAtSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1024l * 1024 * 1024,
128l * 1024 * 1024);
Configuration conf = new Configuration();
JobContext jobContext = mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List<InputSplit> splits = fif.getSplits(jobContext);
assertEquals(8, splits.size());
for (int i = 0 ; i < splits.size() ; i++) {
InputSplit split = splits.get(i);
assertEquals(("host" + i), split.getLocations()[0]);
}
}
@Test
@SuppressWarnings({ "rawtypes", "unchecked" })
public void testLastInputSplitExceedingSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1027l * 1024 * 1024,
128l * 1024 * 1024);
Configuration conf = new Configuration();
JobContext jobContext = mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List<InputSplit> splits = fif.getSplits(jobContext);
assertEquals(8, splits.size());
for (int i = 0; i < splits.size(); i++) {
InputSplit split = splits.get(i);
assertEquals(("host" + i), split.getLocations()[0]);
}
}
@Test
@SuppressWarnings({ "rawtypes", "unchecked" })
public void testLastInputSplitSingleSplit() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(100l * 1024 * 1024,
128l * 1024 * 1024);
Configuration conf = new Configuration();
JobContext jobContext = mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List<InputSplit> splits = fif.getSplits(jobContext);
assertEquals(1, splits.size());
for (int i = 0; i < splits.size(); i++) {
InputSplit split = splits.get(i);
assertEquals(("host" + i), split.getLocations()[0]);
}
}
/**
* Test when the input file's length is 0.
*/
@Test
public void testForEmptyFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fileSys = FileSystem.get(conf);
Path file = new Path("test" + "/file");
FSDataOutputStream out = fileSys.create(file, true,
conf.getInt("io.file.buffer.size", 4096), (short) 1, (long) 1024);
out.write(new byte[0]);
out.close();
// split it using a File input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, "test");
List<InputSplit> splits = inFormat.getSplits(job);
assertEquals(1, splits.size());
FileSplit fileSplit = (FileSplit) splits.get(0);
assertEquals(0, fileSplit.getLocations().length);
assertEquals(file.getName(), fileSplit.getPath().getName());
assertEquals(0, fileSplit.getStart());
assertEquals(0, fileSplit.getLength());
fileSys.delete(file.getParent(), true);
}
/** Dummy class to extend FileInputFormat*/
private class DummyInputFormat extends FileInputFormat<Text, Text> {
@Override
public RecordReader<Text,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return null;
}
}
private class FileInputFormatForTest<K, V> extends FileInputFormat<K, V> {
long splitSize;
long length;
FileInputFormatForTest(long length, long splitSize) {
this.length = length;
this.splitSize = splitSize;
}
@Override
public RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return null;
}
@Override
protected List<FileStatus> listStatus(JobContext job) throws IOException {
FileStatus mockFileStatus = mock(FileStatus.class);
when(mockFileStatus.getBlockSize()).thenReturn(splitSize);
Path mockPath = mock(Path.class);
FileSystem mockFs = mock(FileSystem.class);
BlockLocation[] blockLocations = mockBlockLocations(length, splitSize);
when(mockFs.getFileBlockLocations(mockFileStatus, 0, length)).thenReturn(
blockLocations);
when(mockPath.getFileSystem(any(Configuration.class))).thenReturn(mockFs);
when(mockFileStatus.getPath()).thenReturn(mockPath);
when(mockFileStatus.getLen()).thenReturn(length);
List<FileStatus> list = new ArrayList<FileStatus>();
list.add(mockFileStatus);
return list;
}
@Override
protected long computeSplitSize(long blockSize, long minSize, long maxSize) {
return splitSize;
}
private BlockLocation[] mockBlockLocations(long size, long splitSize) {
int numLocations = (int) (size / splitSize);
if (size % splitSize != 0)
numLocations++;
BlockLocation[] blockLocations = new BlockLocation[numLocations];
for (int i = 0; i < numLocations; i++) {
String[] names = new String[] { "b" + i };
String[] hosts = new String[] { "host" + i };
blockLocations[i] = new BlockLocation(names, hosts, i * splitSize,
Math.min(splitSize, size - (splitSize * i)));
}
return blockLocations;
}
}
}
| 8,517 | 35.55794 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineSequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import java.util.BitSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
public class TestCombineSequenceFileInputFormat {
private static final Log LOG =
LogFactory.getLog(TestCombineSequenceFileInputFormat.class);
private static Configuration conf = new Configuration();
private static FileSystem localFs = null;
static {
try {
conf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(conf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestCombineSequenceFileInputFormat");
@Test(timeout=10000)
public void testFormat() throws IOException, InterruptedException {
Job job = Job.getInstance(conf);
Random random = new Random();
long seed = random.nextLong();
random.setSeed(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int length = 10000;
final int numFiles = 10;
// create files with a variety of lengths
createFiles(length, numFiles, random, job);
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
// create a combine split for the files
InputFormat<IntWritable,BytesWritable> format =
new CombineSequenceFileInputFormat<IntWritable,BytesWritable>();
for (int i = 0; i < 3; i++) {
int numSplits =
random.nextInt(length/(SequenceFile.SYNC_INTERVAL/20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
// we should have a single split as the length is comfortably smaller than
// the block size
assertEquals("We got more than one splits!", 1, splits.size());
InputSplit split = splits.get(0);
assertEquals("It should be CombineFileSplit",
CombineFileSplit.class, split.getClass());
// check the split
BitSet bits = new BitSet(length);
RecordReader<IntWritable,BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<IntWritable,BytesWritable,IntWritable,BytesWritable> mcontext =
new MapContextImpl<IntWritable,BytesWritable,IntWritable,BytesWritable>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
assertEquals("reader class is CombineFileRecordReader.",
CombineFileRecordReader.class, reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key = reader.getCurrentKey();
BytesWritable value = reader.getCurrentValue();
assertNotNull("Value should not be null.", value);
final int k = key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.", bits.get(k));
bits.set(k);
}
} finally {
reader.close();
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
private static class Range {
private final int start;
private final int end;
Range(int start, int end) {
this.start = start;
this.end = end;
}
@Override
public String toString() {
return "(" + start + ", " + end + ")";
}
}
private static Range[] createRanges(int length, int numFiles, Random random) {
// generate a number of files with various lengths
Range[] ranges = new Range[numFiles];
for (int i = 0; i < numFiles; i++) {
int start = i == 0 ? 0 : ranges[i-1].end;
int end = i == numFiles - 1 ?
length :
(length/numFiles)*(2*i + 1)/2 + random.nextInt(length/numFiles) + 1;
ranges[i] = new Range(start, end);
}
return ranges;
}
private static void createFiles(int length, int numFiles, Random random,
Job job) throws IOException {
Range[] ranges = createRanges(length, numFiles, random);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(workDir, "test_" + i + ".seq");
// create a file with length entries
@SuppressWarnings("deprecation")
SequenceFile.Writer writer =
SequenceFile.createWriter(localFs, job.getConfiguration(), file,
IntWritable.class, BytesWritable.class);
Range range = ranges[i];
try {
for (int j = range.start; j < range.end; j++) {
IntWritable key = new IntWritable(j);
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
}
}
}
| 6,670 | 34.673797 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestNLineInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
public class TestNLineInputFormat extends TestCase {
private static int MAX_LENGTH = 200;
private static Configuration conf = new Configuration();
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestNLineInputFormat");
public void testFormat() throws Exception {
Job job = Job.getInstance(conf);
Path file = new Path(workDir, "test.txt");
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
int numLinesPerMap = 5;
NLineInputFormat.setNumLinesPerSplit(job, numLinesPerMap);
for (int length = 0; length < MAX_LENGTH;
length += 1) {
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i)+" some more text");
writer.write("\n");
}
} finally {
writer.close();
}
int lastN = 0;
if (length != 0) {
lastN = length % 5;
if (lastN == 0) {
lastN = 5;
}
}
checkFormat(job, numLinesPerMap, lastN);
}
}
void checkFormat(Job job, int expectedN, int lastN)
throws IOException, InterruptedException {
NLineInputFormat format = new NLineInputFormat();
List<InputSplit> splits = format.getSplits(job);
int count = 0;
for (int i = 0; i < splits.size(); i++) {
assertEquals("There are no split locations", 0,
splits.get(i).getLocations().length);
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, Text> reader = format.createRecordReader(
splits.get(i), context);
Class<?> clazz = reader.getClass();
assertEquals("reader class is LineRecordReader.",
LineRecordReader.class, clazz);
MapContext<LongWritable, Text, LongWritable, Text> mcontext =
new MapContextImpl<LongWritable, Text, LongWritable, Text>(
job.getConfiguration(), context.getTaskAttemptID(), reader, null,
null, MapReduceTestUtil.createDummyReporter(), splits.get(i));
reader.initialize(splits.get(i), mcontext);
try {
count = 0;
while (reader.nextKeyValue()) {
count++;
}
} finally {
reader.close();
}
if ( i == splits.size() - 1) {
assertEquals("number of lines in split(" + i + ") is wrong" ,
lastN, count);
} else {
assertEquals("number of lines in split(" + i + ") is wrong" ,
expectedN, count);
}
}
}
public static void main(String[] args) throws Exception {
new TestNLineInputFormat().testFormat();
}
}
| 4,181 | 32.725806 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeoutException;
import java.util.zip.GZIPOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HdfsBlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.OneBlockInfo;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.OneFileInfo;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import com.google.common.collect.HashMultiset;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.verify;
public class TestCombineFileInputFormat {
private static final String rack1[] = new String[] {
"/r1"
};
private static final String hosts1[] = new String[] {
"host1.rack1.com"
};
private static final String rack2[] = new String[] {
"/r2"
};
private static final String hosts2[] = new String[] {
"host2.rack2.com"
};
private static final String rack3[] = new String[] {
"/r3"
};
private static final String hosts3[] = new String[] {
"host3.rack3.com"
};
final Path inDir = new Path("/racktesting");
final Path outputPath = new Path("/output");
final Path dir1 = new Path(inDir, "/dir1");
final Path dir2 = new Path(inDir, "/dir2");
final Path dir3 = new Path(inDir, "/dir3");
final Path dir4 = new Path(inDir, "/dir4");
final Path dir5 = new Path(inDir, "/dir5");
static final int BLOCKSIZE = 1024;
static final byte[] databuf = new byte[BLOCKSIZE];
@Mock
private List<String> mockList;
@Before
public void initMocks() {
MockitoAnnotations.initMocks(this);
}
private static final String DUMMY_FS_URI = "dummyfs:///";
/** Dummy class to extend CombineFileInputFormat*/
private class DummyInputFormat extends CombineFileInputFormat<Text, Text> {
@Override
public RecordReader<Text,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return null;
}
}
/** Dummy class to extend CombineFileInputFormat. It allows
* non-existent files to be passed into the CombineFileInputFormat, allows
* for easy testing without having to create real files.
*/
private class DummyInputFormat1 extends DummyInputFormat {
@Override
protected List<FileStatus> listStatus(JobContext job) throws IOException {
Path[] files = getInputPaths(job);
List<FileStatus> results = new ArrayList<FileStatus>();
for (int i = 0; i < files.length; i++) {
Path p = files[i];
FileSystem fs = p.getFileSystem(job.getConfiguration());
results.add(fs.getFileStatus(p));
}
return results;
}
}
/** Dummy class to extend CombineFileInputFormat. It allows
* testing with files having missing blocks without actually removing replicas.
*/
public static class MissingBlockFileSystem extends DistributedFileSystem {
String fileWithMissingBlocks;
@Override
public void initialize(URI name, Configuration conf) throws IOException {
fileWithMissingBlocks = "";
super.initialize(name, conf);
}
@Override
public BlockLocation[] getFileBlockLocations(
FileStatus stat, long start, long len) throws IOException {
if (stat.isDir()) {
return null;
}
System.out.println("File " + stat.getPath());
String name = stat.getPath().toUri().getPath();
BlockLocation[] locs =
super.getFileBlockLocations(stat, start, len);
if (name.equals(fileWithMissingBlocks)) {
System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
new String[0], locs[0].getOffset(), locs[0].getLength()), null);
}
return locs;
}
public void setFileWithMissingBlocks(String f) {
fileWithMissingBlocks = f;
}
}
private static final String DUMMY_KEY = "dummy.rr.key";
private static class DummyRecordReader extends RecordReader<Text, Text> {
private TaskAttemptContext context;
private CombineFileSplit s;
private int idx;
private boolean used;
public DummyRecordReader(CombineFileSplit split, TaskAttemptContext context,
Integer i) {
this.context = context;
this.idx = i;
this.s = split;
this.used = true;
}
/** @return a value specified in the context to check whether the
* context is properly updated by the initialize() method.
*/
public String getDummyConfVal() {
return this.context.getConfiguration().get(DUMMY_KEY);
}
public void initialize(InputSplit split, TaskAttemptContext context) {
this.context = context;
this.s = (CombineFileSplit) split;
// By setting used to true in the c'tor, but false in initialize,
// we can check that initialize() is always called before use
// (e.g., in testReinit()).
this.used = false;
}
public boolean nextKeyValue() {
boolean ret = !used;
this.used = true;
return ret;
}
public Text getCurrentKey() {
return new Text(this.context.getConfiguration().get(DUMMY_KEY));
}
public Text getCurrentValue() {
return new Text(this.s.getPath(idx).toString());
}
public float getProgress() {
return used ? 1.0f : 0.0f;
}
public void close() {
}
}
/** Extend CFIF to use CFRR with DummyRecordReader */
private class ChildRRInputFormat extends CombineFileInputFormat<Text, Text> {
@SuppressWarnings("unchecked")
@Override
public RecordReader<Text,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader((CombineFileSplit) split, context,
(Class) DummyRecordReader.class);
}
}
@Test
public void testRecordReaderInit() throws InterruptedException, IOException {
// Test that we properly initialize the child recordreader when
// CombineFileInputFormat and CombineFileRecordReader are used.
TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
Configuration conf1 = new Configuration();
conf1.set(DUMMY_KEY, "STATE1");
TaskAttemptContext context1 = new TaskAttemptContextImpl(conf1, taskId);
// This will create a CombineFileRecordReader that itself contains a
// DummyRecordReader.
InputFormat inputFormat = new ChildRRInputFormat();
Path [] files = { new Path("file1") };
long [] lengths = { 1 };
CombineFileSplit split = new CombineFileSplit(files, lengths);
RecordReader rr = inputFormat.createRecordReader(split, context1);
assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);
// Verify that the initial configuration is the one being used.
// Right after construction the dummy key should have value "STATE1"
assertEquals("Invalid initial dummy key value", "STATE1",
rr.getCurrentKey().toString());
// Switch the active context for the RecordReader...
Configuration conf2 = new Configuration();
conf2.set(DUMMY_KEY, "STATE2");
TaskAttemptContext context2 = new TaskAttemptContextImpl(conf2, taskId);
rr.initialize(split, context2);
// And verify that the new context is updated into the child record reader.
assertEquals("Invalid secondary dummy key value", "STATE2",
rr.getCurrentKey().toString());
}
@Test
public void testReinit() throws Exception {
// Test that a split containing multiple files works correctly,
// with the child RecordReader getting its initialize() method
// called a second time.
TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
Configuration conf = new Configuration();
TaskAttemptContext context = new TaskAttemptContextImpl(conf, taskId);
// This will create a CombineFileRecordReader that itself contains a
// DummyRecordReader.
InputFormat inputFormat = new ChildRRInputFormat();
Path [] files = { new Path("file1"), new Path("file2") };
long [] lengths = { 1, 1 };
CombineFileSplit split = new CombineFileSplit(files, lengths);
RecordReader rr = inputFormat.createRecordReader(split, context);
assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);
// first initialize() call comes from MapTask. We'll do it here.
rr.initialize(split, context);
// First value is first filename.
assertTrue(rr.nextKeyValue());
assertEquals("file1", rr.getCurrentValue().toString());
// The inner RR will return false, because it only emits one (k, v) pair.
// But there's another sub-split to process. This returns true to us.
assertTrue(rr.nextKeyValue());
// And the 2nd rr will have its initialize method called correctly.
assertEquals("file2", rr.getCurrentValue().toString());
// But after both child RR's have returned their singleton (k, v), this
// should also return false.
assertFalse(rr.nextKeyValue());
}
/**
* For testing each split has the expected name, length, and offset.
*/
private final class Split {
private String name;
private long length;
private long offset;
public Split(String name, long length, long offset) {
this.name = name;
this.length = length;
this.offset = offset;
}
public String getName() {
return name;
}
public long getLength() {
return length;
}
public long getOffset() {
return offset;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Split) {
Split split = ((Split) obj);
return split.name.equals(name) && split.length == length
&& split.offset == offset;
}
return false;
}
}
/**
* The test suppresses unchecked warnings in
* {@link org.mockito.Mockito#reset}. Although calling the method is
* a bad manner, we call the method instead of splitting the test
* (i.e. restarting MiniDFSCluster) to save time.
*/
@Test
@SuppressWarnings("unchecked")
public void testSplitPlacement() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
/* Start 3 datanodes, one each in rack r1, r2, r3. Create five files
* 1) file1 and file5, just after starting the datanode on r1, with
* a repl factor of 1, and,
* 2) file2, just after starting the datanode on r2, with
* a repl factor of 2, and,
* 3) file3, file4 after starting the all three datanodes, with a repl
* factor of 3.
* At the end, file1, file5 will be present on only datanode1, file2 will
* be present on datanode 1 and datanode2 and
* file3, file4 will be present on all datanodes.
*/
Configuration conf = new Configuration();
conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive();
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path file1 = new Path(dir1 + "/file1");
writeFile(conf, file1, (short) 1, 1);
// create another file on the same datanode
Path file5 = new Path(dir5 + "/file5");
writeFile(conf, file5, (short) 1, 1);
// split it using a CombinedFile input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, dir1 + "," + dir5);
List<InputSplit> splits = inFormat.getSplits(job);
System.out.println("Made splits(Test0): " + splits.size());
for (InputSplit split : splits) {
System.out.println("File split(Test0): " + split);
}
assertEquals(1, splits.size());
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file5.getName(), fileSplit.getPath(1).getName());
assertEquals(0, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
dfs.startDataNodes(conf, 1, true, null, rack2, hosts2, null);
dfs.waitActive();
// create file on two datanodes.
Path file2 = new Path(dir2 + "/file2");
writeFile(conf, file2, (short) 2, 2);
// split it using a CombinedFile input format
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job, dir1 + "," + dir2);
inFormat.setMinSplitSizeRack(BLOCKSIZE);
splits = inFormat.getSplits(job);
System.out.println("Made splits(Test1): " + splits.size());
for (InputSplit split : splits) {
System.out.println("File split(Test1): " + split);
}
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1. Otherwise create two splits.
*/
if (splits.size() == 2) {
// first split is on rack2, contains file2
if (split.equals(splits.get(0))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file2.getName(), fileSplit.getPath(1).getName());
assertEquals(BLOCKSIZE, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
// second split is on rack1, contains file1
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is on rack1, contains file1 and file2.
assertEquals(3, fileSplit.getNumPaths());
Set<Split> expected = new HashSet<>();
expected.add(new Split(file1.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, BLOCKSIZE));
List<Split> actual = new ArrayList<>();
for (int i = 0; i < 3; i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
assertTrue(actual.containsAll(expected));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Expected split size is 1 or 2, but actual size is "
+ splits.size());
}
}
// create another file on 3 datanodes and 3 racks.
dfs.startDataNodes(conf, 1, true, null, rack3, hosts3, null);
dfs.waitActive();
Path file3 = new Path(dir3 + "/file3");
writeFile(conf, new Path(dir3 + "/file3"), (short) 3, 3);
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3);
inFormat.setMinSplitSizeRack(BLOCKSIZE);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test2): " + split);
}
Set<Split> expected = new HashSet<>();
expected.add(new Split(file1.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file3.getName(), BLOCKSIZE, 0));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE * 2));
List<Split> actual = new ArrayList<>();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1.
* If rack2 or rack3 is processed first and rack1 is processed second,
* create one split on rack2 or rack3 and the other split is on rack1.
* Otherwise create 3 splits for each rack.
*/
if (splits.size() == 3) {
// first split is on rack3, contains file3
if (split.equals(splits.get(0))) {
assertEquals(3, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file3.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file3.getName(), fileSplit.getPath(1).getName());
assertEquals(BLOCKSIZE, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(file3.getName(), fileSplit.getPath(2).getName());
assertEquals(2 * BLOCKSIZE, fileSplit.getOffset(2));
assertEquals(BLOCKSIZE, fileSplit.getLength(2));
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
// second split is on rack2, contains file2
if (split.equals(splits.get(1))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file2.getName(), fileSplit.getPath(1).getName());
assertEquals(BLOCKSIZE, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
// third split is on rack1, contains file1
if (split.equals(splits.get(2))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 2) {
// first split is on rack2 or rack3, contains one or two files.
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getLocations().length);
if (fileSplit.getLocations()[0].equals(hosts2[0])) {
assertEquals(2, fileSplit.getNumPaths());
} else if (fileSplit.getLocations()[0].equals(hosts3[0])) {
assertEquals(3, fileSplit.getNumPaths());
} else {
fail("First split should be on rack2 or rack3.");
}
}
// second split is on rack1, contains the rest files.
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is rack1, contains all three files.
assertEquals(1, fileSplit.getLocations().length);
assertEquals(6, fileSplit.getNumPaths());
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Split size should be 1, 2, or 3.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(6, actual.size());
assertTrue(actual.containsAll(expected));
// create file4 on all three racks
Path file4 = new Path(dir4 + "/file4");
writeFile(conf, file4, (short)3, 3);
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3 + "," + dir4);
inFormat.setMinSplitSizeRack(BLOCKSIZE);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test3): " + split);
}
expected.add(new Split(file4.getName(), BLOCKSIZE, 0));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE * 2));
actual.clear();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1.
* If rack2 or rack3 is processed first and rack1 is processed second,
* create one split on rack2 or rack3 and the other split is on rack1.
* Otherwise create 3 splits for each rack.
*/
if (splits.size() == 3) {
// first split is on rack3, contains file3 and file4
if (split.equals(splits.get(0))) {
assertEquals(6, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
// second split is on rack2, contains file2
if (split.equals(splits.get(1))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file2.getName(), fileSplit.getPath(1).getName());
assertEquals(BLOCKSIZE, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
// third split is on rack1, contains file1
if (split.equals(splits.get(2))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 2) {
// first split is on rack2 or rack3, contains two or three files.
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getLocations().length);
if (fileSplit.getLocations()[0].equals(hosts2[0])) {
assertEquals(5, fileSplit.getNumPaths());
} else if (fileSplit.getLocations()[0].equals(hosts3[0])) {
assertEquals(6, fileSplit.getNumPaths());
} else {
fail("First split should be on rack2 or rack3.");
}
}
// second split is on rack1, contains the rest files.
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is rack1, contains all four files.
assertEquals(1, fileSplit.getLocations().length);
assertEquals(9, fileSplit.getNumPaths());
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Split size should be 1, 2, or 3.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(9, actual.size());
assertTrue(actual.containsAll(expected));
// maximum split size is 2 blocks
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(BLOCKSIZE);
inFormat.setMaxSplitSize(2*BLOCKSIZE);
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test4): " + split);
}
assertEquals(5, splits.size());
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(9, actual.size());
assertTrue(actual.containsAll(expected));
// verify the splits are on all the racks
verify(mockList, atLeastOnce()).add(hosts1[0]);
verify(mockList, atLeastOnce()).add(hosts2[0]);
verify(mockList, atLeastOnce()).add(hosts3[0]);
// maximum split size is 3 blocks
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(BLOCKSIZE);
inFormat.setMaxSplitSize(3*BLOCKSIZE);
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test5): " + split);
}
assertEquals(3, splits.size());
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(9, actual.size());
assertTrue(actual.containsAll(expected));
verify(mockList, atLeastOnce()).add(hosts1[0]);
verify(mockList, atLeastOnce()).add(hosts2[0]);
// maximum split size is 4 blocks
inFormat = new DummyInputFormat();
inFormat.setMaxSplitSize(4*BLOCKSIZE);
FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test6): " + split);
}
assertEquals(3, splits.size());
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(9, actual.size());
assertTrue(actual.containsAll(expected));
verify(mockList, atLeastOnce()).add(hosts1[0]);
// maximum split size is 7 blocks and min is 3 blocks
inFormat = new DummyInputFormat();
inFormat.setMaxSplitSize(7*BLOCKSIZE);
inFormat.setMinSplitSizeNode(3*BLOCKSIZE);
inFormat.setMinSplitSizeRack(3*BLOCKSIZE);
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test7): " + split);
}
assertEquals(2, splits.size());
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(9, actual.size());
assertTrue(actual.containsAll(expected));
verify(mockList, atLeastOnce()).add(hosts1[0]);
// Rack 1 has file1, file2 and file3 and file4
// Rack 2 has file2 and file3 and file4
// Rack 3 has file3 and file4
// setup a filter so that only (file1 and file2) or (file3 and file4)
// can be combined
inFormat = new DummyInputFormat();
FileInputFormat.addInputPath(job, inDir);
inFormat.setMinSplitSizeRack(1); // everything is at least rack local
inFormat.createPool(new TestFilter(dir1),
new TestFilter(dir2));
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test1): " + split);
}
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
if (splits.size() == 2) {
// first split is on rack1, contains file1 and file2.
if (split.equals(splits.get(0))) {
assertEquals(3, fileSplit.getNumPaths());
expected.clear();
expected.add(new Split(file1.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, BLOCKSIZE));
actual.clear();
for (int i = 0; i < 3; i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
assertTrue(actual.containsAll(expected));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(1))) {
// second split contains the file3 and file4, however,
// the locations is undetermined.
assertEquals(6, fileSplit.getNumPaths());
expected.clear();
expected.add(new Split(file3.getName(), BLOCKSIZE, 0));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE * 2));
expected.add(new Split(file4.getName(), BLOCKSIZE, 0));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE * 2));
actual.clear();
for (int i = 0; i < 6; i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
assertTrue(actual.containsAll(expected));
assertEquals(1, fileSplit.getLocations().length);
}
} else if (splits.size() == 3) {
if (split.equals(splits.get(0))) {
// first split is on rack2, contains file2
assertEquals(2, fileSplit.getNumPaths());
expected.clear();
expected.add(new Split(file2.getName(), BLOCKSIZE, 0));
expected.add(new Split(file2.getName(), BLOCKSIZE, BLOCKSIZE));
actual.clear();
for (int i = 0; i < 2; i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
assertTrue(actual.containsAll(expected));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(1))) {
// second split is on rack1, contains file1
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(2))) {
// third split contains file3 and file4, however,
// the locations is undetermined.
assertEquals(6, fileSplit.getNumPaths());
expected.clear();
expected.add(new Split(file3.getName(), BLOCKSIZE, 0));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file3.getName(), BLOCKSIZE, BLOCKSIZE * 2));
expected.add(new Split(file4.getName(), BLOCKSIZE, 0));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE));
expected.add(new Split(file4.getName(), BLOCKSIZE, BLOCKSIZE * 2));
actual.clear();
for (int i = 0; i < 6; i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
assertTrue(actual.containsAll(expected));
assertEquals(1, fileSplit.getLocations().length);
}
} else {
fail("Split size should be 2 or 3.");
}
}
// measure performance when there are multiple pools and
// many files in each pool.
int numPools = 100;
int numFiles = 1000;
DummyInputFormat1 inFormat1 = new DummyInputFormat1();
for (int i = 0; i < numFiles; i++) {
FileInputFormat.setInputPaths(job, file1);
}
inFormat1.setMinSplitSizeRack(1); // everything is at least rack local
final Path dirNoMatch1 = new Path(inDir, "/dirxx");
final Path dirNoMatch2 = new Path(inDir, "/diryy");
for (int i = 0; i < numPools; i++) {
inFormat1.createPool(new TestFilter(dirNoMatch1),
new TestFilter(dirNoMatch2));
}
long start = System.currentTimeMillis();
splits = inFormat1.getSplits(job);
long end = System.currentTimeMillis();
System.out.println("Elapsed time for " + numPools + " pools " +
" and " + numFiles + " files is " +
((end - start)/1000) + " seconds.");
// This file has three whole blocks. If the maxsplit size is
// half the block size, then there should be six splits.
inFormat = new DummyInputFormat();
inFormat.setMaxSplitSize(BLOCKSIZE/2);
FileInputFormat.setInputPaths(job, dir3);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test8): " + split);
}
assertEquals(splits.size(), 6);
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
static void writeFile(Configuration conf, Path name,
short replication, int numBlocks)
throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
FSDataOutputStream stm = fileSys.create(name, true,
conf.getInt("io.file.buffer.size", 4096),
replication, (long)BLOCKSIZE);
writeDataAndSetReplication(fileSys, name, stm, replication, numBlocks);
}
// Creates the gzip file and return the FileStatus
static FileStatus writeGzipFile(Configuration conf, Path name,
short replication, int numBlocks)
throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
GZIPOutputStream out = new GZIPOutputStream(fileSys.create(name, true, conf
.getInt("io.file.buffer.size", 4096), replication, (long) BLOCKSIZE));
writeDataAndSetReplication(fileSys, name, out, replication, numBlocks);
return fileSys.getFileStatus(name);
}
private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
OutputStream out, short replication, int numBlocks)
throws IOException, TimeoutException, InterruptedException {
for (int i = 0; i < numBlocks; i++) {
out.write(databuf);
}
out.close();
DFSTestUtil.waitReplication(fileSys, name, replication);
}
@Test
public void testNodeDistribution() throws IOException, InterruptedException {
DummyInputFormat inFormat = new DummyInputFormat();
int numBlocks = 60;
long totLength = 0;
long blockSize = 100;
int numNodes = 10;
long minSizeNode = 50;
long minSizeRack = 50;
int maxSplitSize = 200; // 4 blocks per split.
String[] locations = new String[numNodes];
for (int i = 0; i < numNodes; i++) {
locations[i] = "h" + i;
}
String[] racks = new String[0];
Path path = new Path("hdfs://file");
OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
int hostCountBase = 0;
// Generate block list. Replication 3 per block.
for (int i = 0; i < numBlocks; i++) {
int localHostCount = hostCountBase;
String[] blockHosts = new String[3];
for (int j = 0; j < 3; j++) {
int hostNum = localHostCount % numNodes;
blockHosts[j] = "h" + hostNum;
localHostCount++;
}
hostCountBase++;
blocks[i] = new OneBlockInfo(path, i * blockSize, blockSize, blockHosts,
racks);
totLength += blockSize;
}
List<InputSplit> splits = new ArrayList<InputSplit>();
HashMap<String, Set<String>> rackToNodes = new HashMap<String, Set<String>>();
HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();
HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();
Map<String, Set<OneBlockInfo>> nodeToBlocks = new TreeMap<String, Set<OneBlockInfo>>();
OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes,
nodeToBlocks, rackToNodes);
inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,
maxSplitSize, minSizeNode, minSizeRack, splits);
int expectedSplitCount = (int) (totLength / maxSplitSize);
assertEquals(expectedSplitCount, splits.size());
// Ensure 90+% of the splits have node local blocks.
// 100% locality may not always be achieved.
int numLocalSplits = 0;
for (InputSplit inputSplit : splits) {
assertEquals(maxSplitSize, inputSplit.getLength());
if (inputSplit.getLocations().length == 1) {
numLocalSplits++;
}
}
assertTrue(numLocalSplits >= 0.9 * splits.size());
}
@Test
public void testNodeInputSplit() throws IOException, InterruptedException {
// Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on
// both nodes. The grouping ensures that both nodes get splits instead of
// just the first node
DummyInputFormat inFormat = new DummyInputFormat();
int numBlocks = 12;
long totLength = 0;
long blockSize = 100;
long maxSize = 200;
long minSizeNode = 50;
long minSizeRack = 50;
String[] locations = { "h1", "h2" };
String[] racks = new String[0];
Path path = new Path("hdfs://file");
OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
for(int i=0; i<numBlocks; ++i) {
blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks);
totLength += blockSize;
}
List<InputSplit> splits = new ArrayList<InputSplit>();
HashMap<String, Set<String>> rackToNodes =
new HashMap<String, Set<String>>();
HashMap<String, List<OneBlockInfo>> rackToBlocks =
new HashMap<String, List<OneBlockInfo>>();
HashMap<OneBlockInfo, String[]> blockToNodes =
new HashMap<OneBlockInfo, String[]>();
HashMap<String, Set<OneBlockInfo>> nodeToBlocks =
new HashMap<String, Set<OneBlockInfo>>();
OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes,
nodeToBlocks, rackToNodes);
inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,
maxSize, minSizeNode, minSizeRack, splits);
int expectedSplitCount = (int)(totLength/maxSize);
assertEquals(expectedSplitCount, splits.size());
HashMultiset<String> nodeSplits = HashMultiset.create();
for(int i=0; i<expectedSplitCount; ++i) {
InputSplit inSplit = splits.get(i);
assertEquals(maxSize, inSplit.getLength());
assertEquals(1, inSplit.getLocations().length);
nodeSplits.add(inSplit.getLocations()[0]);
}
assertEquals(3, nodeSplits.count(locations[0]));
assertEquals(3, nodeSplits.count(locations[1]));
}
/**
* The test suppresses unchecked warnings in
* {@link org.mockito.Mockito#reset}. Although calling the method is
* a bad manner, we call the method instead of splitting the test
* (i.e. restarting MiniDFSCluster) to save time.
*/
@Test
@SuppressWarnings("unchecked")
public void testSplitPlacementForCompressedFiles() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
/* Start 3 datanodes, one each in rack r1, r2, r3. Create five gzipped
* files
* 1) file1 and file5, just after starting the datanode on r1, with
* a repl factor of 1, and,
* 2) file2, just after starting the datanode on r2, with
* a repl factor of 2, and,
* 3) file3, file4 after starting the all three datanodes, with a repl
* factor of 3.
* At the end, file1, file5 will be present on only datanode1, file2 will
* be present on datanode 1 and datanode2 and
* file3, file4 will be present on all datanodes.
*/
Configuration conf = new Configuration();
conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive();
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path file1 = new Path(dir1 + "/file1.gz");
FileStatus f1 = writeGzipFile(conf, file1, (short)1, 1);
// create another file on the same datanode
Path file5 = new Path(dir5 + "/file5.gz");
FileStatus f5 = writeGzipFile(conf, file5, (short)1, 1);
// split it using a CombinedFile input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, dir1 + "," + dir5);
List<InputSplit> splits = inFormat.getSplits(job);
System.out.println("Made splits(Test0): " + splits.size());
for (InputSplit split : splits) {
System.out.println("File split(Test0): " + split);
}
assertEquals(1, splits.size());
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(f1.getLen(), fileSplit.getLength(0));
assertEquals(file5.getName(), fileSplit.getPath(1).getName());
assertEquals(0, fileSplit.getOffset(1));
assertEquals(f5.getLen(), fileSplit.getLength(1));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
dfs.startDataNodes(conf, 1, true, null, rack2, hosts2, null);
dfs.waitActive();
// create file on two datanodes.
Path file2 = new Path(dir2 + "/file2.gz");
FileStatus f2 = writeGzipFile(conf, file2, (short)2, 2);
// split it using a CombinedFile input format
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job, dir1 + "," + dir2);
inFormat.setMinSplitSizeRack(f1.getLen());
splits = inFormat.getSplits(job);
System.out.println("Made splits(Test1): " + splits.size());
// make sure that each split has different locations
for (InputSplit split : splits) {
System.out.println("File split(Test1): " + split);
}
Set<Split> expected = new HashSet<>();
expected.add(new Split(file1.getName(), f1.getLen(), 0));
expected.add(new Split(file2.getName(), f2.getLen(), 0));
List<Split> actual = new ArrayList<>();
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1. Otherwise create two splits.
*/
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
if (splits.size() == 2) {
if (split.equals(splits.get(0))) {
// first split is on rack2, contains file2.
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(f2.getLen(), fileSplit.getLength(0));
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(1))) {
// second split is on rack1, contains file1.
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(f1.getLen(), fileSplit.getLength(0));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is on rack1, contains file1 and file2.
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Split size should be 1 or 2.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(2, actual.size());
assertTrue(actual.containsAll(expected));
// create another file on 3 datanodes and 3 racks.
dfs.startDataNodes(conf, 1, true, null, rack3, hosts3, null);
dfs.waitActive();
Path file3 = new Path(dir3 + "/file3.gz");
FileStatus f3 = writeGzipFile(conf, file3, (short)3, 3);
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3);
inFormat.setMinSplitSizeRack(f1.getLen());
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test2): " + split);
}
expected.add(new Split(file3.getName(), f3.getLen(), 0));
actual.clear();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1.
* If rack2 or rack3 is processed first and rack1 is processed second,
* create one split on rack2 or rack3 and the other split is on rack1.
* Otherwise create 3 splits for each rack.
*/
if (splits.size() == 3) {
// first split is on rack3, contains file3
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file3.getName(), fileSplit.getPath(0).getName());
assertEquals(f3.getLen(), fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
// second split is on rack2, contains file2
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(f2.getLen(), fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
// third split is on rack1, contains file1
if (split.equals(splits.get(2))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(f1.getLen(), fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 2) {
// first split is on rack2 or rack3, contains one or two files.
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getLocations().length);
if (fileSplit.getLocations()[0].equals(hosts2[0])) {
assertEquals(2, fileSplit.getNumPaths());
} else if (fileSplit.getLocations()[0].equals(hosts3[0])) {
assertEquals(1, fileSplit.getNumPaths());
} else {
fail("First split should be on rack2 or rack3.");
}
}
// second split is on rack1, contains the rest files.
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is rack1, contains all three files.
assertEquals(1, fileSplit.getLocations().length);
assertEquals(3, fileSplit.getNumPaths());
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Split size should be 1, 2, or 3.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(3, actual.size());
assertTrue(actual.containsAll(expected));
// create file4 on all three racks
Path file4 = new Path(dir4 + "/file4.gz");
FileStatus f4 = writeGzipFile(conf, file4, (short)3, 3);
inFormat = new DummyInputFormat();
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
inFormat.setMinSplitSizeRack(f1.getLen());
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test3): " + split);
}
expected.add(new Split(file3.getName(), f3.getLen(), 0));
actual.clear();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1.
* If rack2 or rack3 is processed first and rack1 is processed second,
* create one split on rack2 or rack3 and the other split is on rack1.
* Otherwise create 3 splits for each rack.
*/
if (splits.size() == 3) {
// first split is on rack3, contains file3 and file4
if (split.equals(splits.get(0))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
// second split is on rack2, contains file2
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file2.getName(), fileSplit.getPath(0).getName());
assertEquals(f2.getLen(), fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
// third split is on rack1, contains file1
if (split.equals(splits.get(2))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(f1.getLen(), fileSplit.getLength(0));
assertEquals(0, fileSplit.getOffset(0));
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 2) {
// first split is on rack2 or rack3, contains two or three files.
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getLocations().length);
if (fileSplit.getLocations()[0].equals(hosts2[0])) {
assertEquals(3, fileSplit.getNumPaths());
} else if (fileSplit.getLocations()[0].equals(hosts3[0])) {
assertEquals(2, fileSplit.getNumPaths());
} else {
fail("First split should be on rack2 or rack3.");
}
}
// second split is on rack1, contains the rest files.
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 1) {
// first split is rack1, contains all four files.
assertEquals(1, fileSplit.getLocations().length);
assertEquals(4, fileSplit.getNumPaths());
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} else {
fail("Split size should be 1, 2, or 3.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(4, actual.size());
assertTrue(actual.containsAll(expected));
// maximum split size is file1's length
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(f1.getLen());
inFormat.setMaxSplitSize(f1.getLen());
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test4): " + split);
}
assertEquals(4, splits.size());
actual.clear();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(4, actual.size());
assertTrue(actual.containsAll(expected));
verify(mockList, atLeastOnce()).add(hosts1[0]);
verify(mockList, atLeastOnce()).add(hosts2[0]);
verify(mockList, atLeastOnce()).add(hosts3[0]);
// maximum split size is twice file1's length
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(f1.getLen());
inFormat.setMaxSplitSize(2 * f1.getLen());
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test5): " + split);
}
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(4, actual.size());
assertTrue(actual.containsAll(expected));
if (splits.size() == 3) {
// splits are on all the racks
verify(mockList, times(1)).add(hosts1[0]);
verify(mockList, times(1)).add(hosts2[0]);
verify(mockList, times(1)).add(hosts3[0]);
} else if (splits.size() == 2) {
// one split is on rack1, another split is on rack2 or rack3
verify(mockList, times(1)).add(hosts1[0]);
} else {
fail("Split size should be 2 or 3.");
}
// maximum split size is 4 times file1's length
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(2 * f1.getLen());
inFormat.setMaxSplitSize(4 * f1.getLen());
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test6): " + split);
}
/**
* If rack1 is processed first by
* {@link CombineFileInputFormat#createSplits},
* create only one split on rack1. Otherwise create two splits.
*/
assertTrue("Split size should be 1 or 2.",
splits.size() == 1 || splits.size() == 2);
actual.clear();
reset(mockList);
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
mockList.add(fileSplit.getLocations()[0]);
}
assertEquals(4, actual.size());
assertTrue(actual.containsAll(expected));
verify(mockList, times(1)).add(hosts1[0]);
// maximum split size and min-split-size per rack is 4 times file1's length
inFormat = new DummyInputFormat();
inFormat.setMaxSplitSize(4 * f1.getLen());
inFormat.setMinSplitSizeRack(4 * f1.getLen());
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test7): " + split);
}
assertEquals(1, splits.size());
fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(4, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
// minimum split size per node is 4 times file1's length
inFormat = new DummyInputFormat();
inFormat.setMinSplitSizeNode(4 * f1.getLen());
FileInputFormat.setInputPaths(job,
dir1 + "," + dir2 + "," + dir3 + "," + dir4);
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test8): " + split);
}
assertEquals(1, splits.size());
fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(4, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
// Rack 1 has file1, file2 and file3 and file4
// Rack 2 has file2 and file3 and file4
// Rack 3 has file3 and file4
// setup a filter so that only file1 and file2 can be combined
inFormat = new DummyInputFormat();
FileInputFormat.addInputPath(job, inDir);
inFormat.setMinSplitSizeRack(1); // everything is at least rack local
inFormat.createPool(new TestFilter(dir1),
new TestFilter(dir2));
splits = inFormat.getSplits(job);
for (InputSplit split : splits) {
System.out.println("File split(Test9): " + split);
}
actual.clear();
for (InputSplit split : splits) {
fileSplit = (CombineFileSplit) split;
if (splits.size() == 3) {
// If rack2 is processed first
if (split.equals(splits.get(0))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts2[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(1))) {
assertEquals(1, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(2))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
} else if (splits.size() == 2) {
// If rack1 is processed first
if (split.equals(splits.get(0))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
}
if (split.equals(splits.get(1))) {
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(hosts3[0], fileSplit.getLocations()[0]);
}
} else {
fail("Split size should be 2 or 3.");
}
for (int i = 0; i < fileSplit.getNumPaths(); i++) {
String name = fileSplit.getPath(i).getName();
long length = fileSplit.getLength(i);
long offset = fileSplit.getOffset(i);
actual.add(new Split(name, length, offset));
}
}
assertEquals(4, actual.size());
assertTrue(actual.containsAll(expected));
// measure performance when there are multiple pools and
// many files in each pool.
int numPools = 100;
int numFiles = 1000;
DummyInputFormat1 inFormat1 = new DummyInputFormat1();
for (int i = 0; i < numFiles; i++) {
FileInputFormat.setInputPaths(job, file1);
}
inFormat1.setMinSplitSizeRack(1); // everything is at least rack local
final Path dirNoMatch1 = new Path(inDir, "/dirxx");
final Path dirNoMatch2 = new Path(inDir, "/diryy");
for (int i = 0; i < numPools; i++) {
inFormat1.createPool(new TestFilter(dirNoMatch1),
new TestFilter(dirNoMatch2));
}
long start = System.currentTimeMillis();
splits = inFormat1.getSplits(job);
long end = System.currentTimeMillis();
System.out.println("Elapsed time for " + numPools + " pools " +
" and " + numFiles + " files is " +
((end - start)) + " milli seconds.");
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
/**
* Test that CFIF can handle missing blocks.
*/
@Test
public void testMissingBlocks() throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
String testName = "testMissingBlocks";
try {
Configuration conf = new Configuration();
conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName());
conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive();
namenode = (dfs.getFileSystem()).getUri().getHost() + ":" +
(dfs.getFileSystem()).getUri().getPort();
fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path file1 = new Path(dir1 + "/file1");
writeFile(conf, file1, (short)1, 1);
// create another file on the same datanode
Path file5 = new Path(dir5 + "/file5");
writeFile(conf, file5, (short)1, 1);
((MissingBlockFileSystem)fileSys).setFileWithMissingBlocks(file1.toUri().getPath());
// split it using a CombinedFile input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, dir1 + "," + dir5);
List<InputSplit> splits = inFormat.getSplits(job);
System.out.println("Made splits(Test0): " + splits.size());
for (InputSplit split : splits) {
System.out.println("File split(Test0): " + split);
}
assertEquals(splits.size(), 1);
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(2, fileSplit.getNumPaths());
assertEquals(1, fileSplit.getLocations().length);
assertEquals(file1.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(BLOCKSIZE, fileSplit.getLength(0));
assertEquals(file5.getName(), fileSplit.getPath(1).getName());
assertEquals(0, fileSplit.getOffset(1));
assertEquals(BLOCKSIZE, fileSplit.getLength(1));
assertEquals(hosts1[0], fileSplit.getLocations()[0]);
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
/**
* Test when the input file's length is 0.
*/
@Test
public void testForEmptyFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fileSys = FileSystem.get(conf);
Path file = new Path("test" + "/file");
FSDataOutputStream out = fileSys.create(file, true,
conf.getInt("io.file.buffer.size", 4096), (short) 1, (long) BLOCKSIZE);
out.write(new byte[0]);
out.close();
// split it using a CombinedFile input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, "test");
List<InputSplit> splits = inFormat.getSplits(job);
assertEquals(1, splits.size());
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(0, fileSplit.getLength(0));
fileSys.delete(file.getParent(), true);
}
/**
* Test that directories do not get included as part of getSplits()
*/
@Test
public void testGetSplitsWithDirectory() throws Exception {
MiniDFSCluster dfs = null;
try {
Configuration conf = new Configuration();
dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive();
FileSystem fileSys = dfs.getFileSystem();
// Set up the following directory structure:
// /dir1/: directory
// /dir1/file: regular file
// /dir1/dir2/: directory
Path dir1 = new Path("/dir1");
Path file = new Path("/dir1/file1");
Path dir2 = new Path("/dir1/dir2");
if (!fileSys.mkdirs(dir1)) {
throw new IOException("Mkdirs failed to create " + dir1.toString());
}
FSDataOutputStream out = fileSys.create(file);
out.write(new byte[0]);
out.close();
if (!fileSys.mkdirs(dir2)) {
throw new IOException("Mkdirs failed to create " + dir2.toString());
}
// split it using a CombinedFile input format
DummyInputFormat inFormat = new DummyInputFormat();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, "/dir1");
List<InputSplit> splits = inFormat.getSplits(job);
// directories should be omitted from getSplits() - we should only see file1 and not dir2
assertEquals(1, splits.size());
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
assertEquals(1, fileSplit.getNumPaths());
assertEquals(file.getName(), fileSplit.getPath(0).getName());
assertEquals(0, fileSplit.getOffset(0));
assertEquals(0, fileSplit.getLength(0));
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
/**
* Test when input files are from non-default file systems
*/
@Test
public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf = new Configuration();
// use a fake file system scheme as default
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DUMMY_FS_URI);
// default fs path
assertEquals(DUMMY_FS_URI, FileSystem.getDefaultUri(conf).toString());
// add a local file
Path localPath = new Path("testFile1");
FileSystem lfs = FileSystem.getLocal(conf);
FSDataOutputStream dos = lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, lfs.makeQualified(localPath));
DummyInputFormat inFormat = new DummyInputFormat();
List<InputSplit> splits = inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for (InputSplit s : splits) {
CombineFileSplit cfs = (CombineFileSplit)s;
for (Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(), "file");
}
}
}
static class TestFilter implements PathFilter {
private Path p;
// store a path prefix in this TestFilter
public TestFilter(Path p) {
this.p = p;
}
// returns true if the specified path matches the prefix stored
// in this TestFilter.
public boolean accept(Path path) {
if (path.toUri().getPath().indexOf(p.toString()) == 0) {
return true;
}
return false;
}
public String toString() {
return "PathFilter:" + p;
}
}
/*
* Prints out the input splits for the specified files
*/
private void splitRealFiles(String[] args) throws IOException {
Configuration conf = new Configuration();
Job job = Job.getInstance();
FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IOException("Wrong file system: " + fs.getClass().getName());
}
long blockSize = fs.getDefaultBlockSize();
DummyInputFormat inFormat = new DummyInputFormat();
for (int i = 0; i < args.length; i++) {
FileInputFormat.addInputPaths(job, args[i]);
}
inFormat.setMinSplitSizeRack(blockSize);
inFormat.setMaxSplitSize(10 * blockSize);
List<InputSplit> splits = inFormat.getSplits(job);
System.out.println("Total number of splits " + splits.size());
for (int i = 0; i < splits.size(); ++i) {
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(i);
System.out.println("Split[" + i + "] " + fileSplit);
}
}
public static void main(String[] args) throws Exception{
// if there are some parameters specified, then use those paths
if (args.length != 0) {
TestCombineFileInputFormat test = new TestCombineFileInputFormat();
test.splitRealFiles(args);
} else {
TestCombineFileInputFormat test = new TestCombineFileInputFormat();
test.testSplitPlacement();
}
}
}
| 75,311 | 39.599461 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFixedLengthInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestFixedLengthInputFormat {
private static Log LOG;
private static Configuration defaultConf;
private static FileSystem localFs;
private static Path workDir;
// some chars for the record data
private static char[] chars;
private static Random charRand;
@BeforeClass
public static void onlyOnce() {
try {
LOG = LogFactory.getLog(TestFixedLengthInputFormat.class.getName());
defaultConf = new Configuration();
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
// our set of chars
chars = ("abcdefghijklmnopqrstuvABCDEFGHIJKLMN OPQRSTUVWXYZ1234567890)"
+ "(*&^%$#@!-=><?:\"{}][';/.,']").toCharArray();
workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestKeyValueFixedLengthInputFormat");
charRand = new Random();
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
/**
* 20 random tests of various record, file, and split sizes. All tests have
* uncompressed file as input.
*/
@Test (timeout=500000)
public void testFormat() throws Exception {
runRandomTests(null);
}
/**
* 20 random tests of various record, file, and split sizes. All tests have
* compressed file as input.
*/
@Test (timeout=500000)
public void testFormatCompressedIn() throws Exception {
runRandomTests(new GzipCodec());
}
/**
* Test with no record length set.
*/
@Test (timeout=5000)
public void testNoRecordLength() throws Exception {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
// Create the job and do not set fixed record length
Job job = Job.getInstance(defaultConf);
FileInputFormat.setInputPaths(job, workDir);
FixedLengthInputFormat format = new FixedLengthInputFormat();
List<InputSplit> splits = format.getSplits(job);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable>
mcontext =
new MapContextImpl<LongWritable, BytesWritable, LongWritable,
BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(),
reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for not setting record length:", exceptionThrown);
}
/**
* Test with record length set to 0
*/
@Test (timeout=5000)
public void testZeroRecordLength() throws Exception {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
Job job = Job.getInstance(defaultConf);
// Set the fixed length record length config property
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(), 0);
FileInputFormat.setInputPaths(job, workDir);
List<InputSplit> splits = format.getSplits(job);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
TaskAttemptContext context =
MapReduceTestUtil.createDummyMapTaskAttemptContext(
job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable>
mcontext =
new MapContextImpl<LongWritable, BytesWritable, LongWritable,
BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(),
reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for zero record length:", exceptionThrown);
}
/**
* Test with record length set to a negative value
*/
@Test (timeout=5000)
public void testNegativeRecordLength() throws Exception {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
// Set the fixed length record length config property
Job job = Job.getInstance(defaultConf);
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(), -10);
FileInputFormat.setInputPaths(job, workDir);
List<InputSplit> splits = format.getSplits(job);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable>
mcontext =
new MapContextImpl<LongWritable, BytesWritable, LongWritable,
BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(),
reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for negative record length:", exceptionThrown);
}
/**
* Test with partial record at the end of a compressed input file.
*/
@Test (timeout=5000)
public void testPartialRecordCompressedIn() throws Exception {
CompressionCodec gzip = new GzipCodec();
runPartialRecordTest(gzip);
}
/**
* Test with partial record at the end of an uncompressed input file.
*/
@Test (timeout=5000)
public void testPartialRecordUncompressedIn() throws Exception {
runPartialRecordTest(null);
}
/**
* Test using the gzip codec with two input files.
*/
@Test (timeout=5000)
public void testGzipWithTwoInputs() throws Exception {
CompressionCodec gzip = new GzipCodec();
localFs.delete(workDir, true);
Job job = Job.getInstance(defaultConf);
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(), 5);
ReflectionUtils.setConf(gzip, job.getConfiguration());
FileInputFormat.setInputPaths(job, workDir);
// Create files with fixed length records with 5 byte long records.
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"one two threefour five six seveneightnine ten ");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"ten nine eightsevensix five four threetwo one ");
List<InputSplit> splits = format.getSplits(job);
assertEquals("compressed splits == 2", 2, splits.size());
FileSplit tmp = (FileSplit) splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0, splits.get(1));
splits.set(1, tmp);
}
List<String> results = readSplit(format, splits.get(0), job);
assertEquals("splits[0] length", 10, results.size());
assertEquals("splits[0][5]", "six ", results.get(5));
results = readSplit(format, splits.get(1), job);
assertEquals("splits[1] length", 10, results.size());
assertEquals("splits[1][0]", "ten ", results.get(0));
assertEquals("splits[1][1]", "nine ", results.get(1));
}
// Create a file containing fixed length records with random data
private ArrayList<String> createFile(Path targetFile, CompressionCodec codec,
int recordLen,
int numRecords) throws IOException {
ArrayList<String> recordList = new ArrayList<String>(numRecords);
OutputStream ostream = localFs.create(targetFile);
if (codec != null) {
ostream = codec.createOutputStream(ostream);
}
Writer writer = new OutputStreamWriter(ostream);
try {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < recordLen; j++) {
sb.append(chars[charRand.nextInt(chars.length)]);
}
String recordData = sb.toString();
recordList.add(recordData);
writer.write(recordData);
sb.setLength(0);
}
} finally {
writer.close();
}
return recordList;
}
private void runRandomTests(CompressionCodec codec) throws Exception {
StringBuilder fileName = new StringBuilder("testFormat.txt");
if (codec != null) {
fileName.append(".gz");
}
localFs.delete(workDir, true);
Path file = new Path(workDir, fileName.toString());
int seed = new Random().nextInt();
LOG.info("Seed = " + seed);
Random random = new Random(seed);
int MAX_TESTS = 20;
LongWritable key;
BytesWritable value;
for (int i = 0; i < MAX_TESTS; i++) {
LOG.info("----------------------------------------------------------");
// Maximum total records of 999
int totalRecords = random.nextInt(999)+1;
// Test an empty file
if (i == 8) {
totalRecords = 0;
}
// Maximum bytes in a record of 100K
int recordLength = random.nextInt(1024*100)+1;
// For the 11th test, force a record length of 1
if (i == 10) {
recordLength = 1;
}
// The total bytes in the test file
int fileSize = (totalRecords * recordLength);
LOG.info("totalRecords=" + totalRecords + " recordLength="
+ recordLength);
// Create the job
Job job = Job.getInstance(defaultConf);
if (codec != null) {
ReflectionUtils.setConf(codec, job.getConfiguration());
}
// Create the test file
ArrayList<String> recordList =
createFile(file, codec, recordLength, totalRecords);
assertTrue(localFs.exists(file));
//set the fixed length record length config property for the job
FixedLengthInputFormat.setRecordLength(job.getConfiguration(),
recordLength);
int numSplits = 1;
// Arbitrarily set number of splits.
if (i > 0) {
if (i == (MAX_TESTS-1)) {
// Test a split size that is less than record len
numSplits = (int)(fileSize/Math.floor(recordLength/2));
} else {
if (MAX_TESTS % i == 0) {
// Let us create a split size that is forced to be
// smaller than the end file itself, (ensures 1+ splits)
numSplits = fileSize/(fileSize - random.nextInt(fileSize));
} else {
// Just pick a random split size with no upper bound
numSplits = Math.max(1, fileSize/random.nextInt(Integer.MAX_VALUE));
}
}
LOG.info("Number of splits set to: " + numSplits);
}
job.getConfiguration().setLong(
"mapreduce.input.fileinputformat.split.maxsize",
(long)(fileSize/numSplits));
// setup the input path
FileInputFormat.setInputPaths(job, workDir);
// Try splitting the file in a variety of sizes
FixedLengthInputFormat format = new FixedLengthInputFormat();
List<InputSplit> splits = format.getSplits(job);
LOG.info("Actual number of splits = " + splits.size());
// Test combined split lengths = total file size
long recordOffset = 0;
int recordNumber = 0;
for (InputSplit split : splits) {
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable>
mcontext =
new MapContextImpl<LongWritable, BytesWritable, LongWritable,
BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(),
reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
Class<?> clazz = reader.getClass();
assertEquals("RecordReader class should be FixedLengthRecordReader:",
FixedLengthRecordReader.class, clazz);
// Plow through the records in this split
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
value = reader.getCurrentValue();
assertEquals("Checking key", (long)(recordNumber*recordLength),
key.get());
String valueString = new String(value.getBytes(), 0,
value.getLength());
assertEquals("Checking record length:", recordLength,
value.getLength());
assertTrue("Checking for more records than expected:",
recordNumber < totalRecords);
String origRecord = recordList.get(recordNumber);
assertEquals("Checking record content:", origRecord, valueString);
recordNumber++;
}
reader.close();
}
assertEquals("Total original records should be total read records:",
recordList.size(), recordNumber);
}
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static List<String> readSplit(FixedLengthInputFormat format,
InputSplit split,
Job job) throws Exception {
List<String> result = new ArrayList<String>();
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable>
mcontext =
new MapContextImpl<LongWritable, BytesWritable, LongWritable,
BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(),
reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
LongWritable key;
BytesWritable value;
try {
reader.initialize(split, mcontext);
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
value = reader.getCurrentValue();
result.add(new String(value.getBytes(), 0, value.getLength()));
}
} finally {
reader.close();
}
return result;
}
private void runPartialRecordTest(CompressionCodec codec) throws Exception {
localFs.delete(workDir, true);
Job job = Job.getInstance(defaultConf);
// Create a file with fixed length records with 5 byte long
// records with a partial record at the end.
StringBuilder fileName = new StringBuilder("testFormat.txt");
if (codec != null) {
fileName.append(".gz");
ReflectionUtils.setConf(codec, job.getConfiguration());
}
writeFile(localFs, new Path(workDir, fileName.toString()), codec,
"one two threefour five six seveneightnine ten");
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(), 5);
FileInputFormat.setInputPaths(job, workDir);
List<InputSplit> splits = format.getSplits(job);
if (codec != null) {
assertEquals("compressed splits == 1", 1, splits.size());
}
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
List<String> results = readSplit(format, split, job);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for partial record:", exceptionThrown);
}
}
| 18,356 | 38.308351 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.conf.*;
public class TestMRSequenceFileAsTextInputFormat extends TestCase {
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
public void testFormat() throws Exception {
Job job = Job.getInstance(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "test.seq");
int seed = new Random().nextInt();
Random random = new Random(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
// create a file with length entries
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
IntWritable.class, LongWritable.class);
try {
for (int i = 0; i < length; i++) {
IntWritable key = new IntWritable(i);
LongWritable value = new LongWritable(10 * i);
writer.append(key, value);
}
} finally {
writer.close();
}
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
// try splitting the file in a variety of sizes
InputFormat<Text, Text> format =
new SequenceFileAsTextInputFormat();
for (int i = 0; i < 3; i++) {
// check each split
BitSet bits = new BitSet(length);
int numSplits =
random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
FileInputFormat.setMaxInputSplitSize(job,
fs.getFileStatus(file).getLen() / numSplits);
for (InputSplit split : format.getSplits(job)) {
RecordReader<Text, Text> reader =
format.createRecordReader(split, context);
MapContext<Text, Text, Text, Text> mcontext =
new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(),
split);
reader.initialize(split, mcontext);
Class<?> readerClass = reader.getClass();
assertEquals("reader class is SequenceFileAsTextRecordReader.",
SequenceFileAsTextRecordReader.class, readerClass);
Text key;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
int keyInt = Integer.parseInt(key.toString());
assertFalse("Key in multiple partitions.", bits.get(keyInt));
bits.set(keyInt);
count++;
}
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
public static void main(String[] args) throws Exception {
new TestMRSequenceFileAsTextInputFormat().testFormat();
}
}
| 4,460 | 36.175 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileInputFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.conf.*;
public class TestMRSequenceFileInputFilter extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestMRSequenceFileInputFilter.class.getName());
private static final int MAX_LENGTH = 15000;
private static final Configuration conf = new Configuration();
private static final Job job;
private static final FileSystem fs;
private static final Path inDir =
new Path(System.getProperty("test.build.data",".") + "/mapred");
private static final Path inFile = new Path(inDir, "test.seq");
private static final Random random = new Random(1);
static {
try {
job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, inDir);
fs = FileSystem.getLocal(conf);
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private static void createSequenceFile(int numRecords) throws Exception {
// create a file with length entries
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, inFile,
Text.class, BytesWritable.class);
try {
for (int i = 1; i <= numRecords; i++) {
Text key = new Text(Integer.toString(i));
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
}
private int countRecords(int numSplits)
throws IOException, InterruptedException {
InputFormat<Text, BytesWritable> format =
new SequenceFileInputFilter<Text, BytesWritable>();
if (numSplits == 0) {
numSplits =
random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
}
FileInputFormat.setMaxInputSplitSize(job,
fs.getFileStatus(inFile).getLen() / numSplits);
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
// check each split
int count = 0;
for (InputSplit split : format.getSplits(job)) {
RecordReader<Text, BytesWritable> reader =
format.createRecordReader(split, context);
MapContext<Text, BytesWritable, Text, BytesWritable> mcontext =
new MapContextImpl<Text, BytesWritable, Text, BytesWritable>(
job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
try {
while (reader.nextKeyValue()) {
LOG.info("Accept record " + reader.getCurrentKey().toString());
count++;
}
} finally {
reader.close();
}
}
return count;
}
public void testRegexFilter() throws Exception {
// set the filter class
LOG.info("Testing Regex Filter with patter: \\A10*");
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.RegexFilter.class);
SequenceFileInputFilter.RegexFilter.setPattern(
job.getConfiguration(), "\\A10*");
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 1; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.info("******Number of records: " + length);
createSequenceFile(length);
int count = countRecords(0);
assertEquals(count, length==0 ? 0 : (int)Math.log10(length) + 1);
}
// clean up
fs.delete(inDir, true);
}
public void testPercentFilter() throws Exception {
LOG.info("Testing Percent Filter with frequency: 1000");
// set the filter class
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.PercentFilter.class);
SequenceFileInputFilter.PercentFilter.setFrequency(
job.getConfiguration(), 1000);
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.info("******Number of records: "+length);
createSequenceFile(length);
int count = countRecords(1);
LOG.info("Accepted " + count + " records");
int expectedCount = length / 1000;
if (expectedCount * 1000 != length)
expectedCount++;
assertEquals(count, expectedCount);
}
// clean up
fs.delete(inDir, true);
}
public void testMD5Filter() throws Exception {
// set the filter class
LOG.info("Testing MD5 Filter with frequency: 1000");
SequenceFileInputFilter.setFilterClass(job,
SequenceFileInputFilter.MD5Filter.class);
SequenceFileInputFilter.MD5Filter.setFrequency(
job.getConfiguration(), 1000);
// clean input dir
fs.delete(inDir, true);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.info("******Number of records: " + length);
createSequenceFile(length);
LOG.info("Accepted " + countRecords(0) + " records");
}
// clean up
fs.delete(inDir, true);
}
public static void main(String[] args) throws Exception {
TestMRSequenceFileInputFilter filter = new TestMRSequenceFileInputFilter();
filter.testRegexFilter();
}
}
| 6,786 | 33.627551 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
public class TestDelegatingInputFormat extends TestCase {
@SuppressWarnings("unchecked")
public void testSplitting() throws Exception {
Job job = Job.getInstance();
MiniDFSCluster dfs = null;
try {
dfs = new MiniDFSCluster.Builder(job.getConfiguration()).numDataNodes(4)
.racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
.hosts(new String[] { "host0", "host1", "host2", "host3" })
.build();
FileSystem fs = dfs.getFileSystem();
Path path = getPath("/foo/bar", fs);
Path path2 = getPath("/foo/baz", fs);
Path path3 = getPath("/bar/bar", fs);
Path path4 = getPath("/bar/baz", fs);
final int numSplits = 100;
FileInputFormat.setMaxInputSplitSize(job,
fs.getFileStatus(path).getLen() / numSplits);
MultipleInputs.addInputPath(job, path, TextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(job, path2, TextInputFormat.class,
MapClass2.class);
MultipleInputs.addInputPath(job, path3, KeyValueTextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(job, path4, TextInputFormat.class,
MapClass2.class);
DelegatingInputFormat inFormat = new DelegatingInputFormat();
int[] bins = new int[3];
for (InputSplit split : (List<InputSplit>)inFormat.getSplits(job)) {
assertTrue(split instanceof TaggedInputSplit);
final TaggedInputSplit tis = (TaggedInputSplit) split;
int index = -1;
if (tis.getInputFormatClass().equals(KeyValueTextInputFormat.class)) {
// path3
index = 0;
} else if (tis.getMapperClass().equals(MapClass.class)) {
// path
index = 1;
} else {
// path2 and path4
index = 2;
}
bins[index]++;
}
assertEquals("count is not equal to num splits", numSplits, bins[0]);
assertEquals("count is not equal to num splits", numSplits, bins[1]);
assertEquals("count is not equal to 2 * num splits",
numSplits * 2, bins[2]);
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
static Path getPath(final String location, final FileSystem fs)
throws IOException {
Path path = new Path(location);
// create a multi-block file on hdfs
DataOutputStream out = fs.create(path, true, 4096, (short) 2, 512, null);
for (int i = 0; i < 1000; ++i) {
out.writeChars("Hello\n");
}
out.close();
return path;
}
static class MapClass extends Mapper<String, String, String, String> {
}
static class MapClass2 extends MapClass {
}
}
| 3,894 | 32.290598 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReaderJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.UtilsForTests;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.Test;
public class TestLineRecordReaderJobs {
private static Path workDir = new Path(new Path(System.getProperty(
"test.build.data", "."), "data"), "TestTextInputFormat");
private static Path inputDir = new Path(workDir, "input");
private static Path outputDir = new Path(workDir, "output");
/**
* Writes the input test file
*
* @param conf
* @throws IOException
*/
public void createInputFile(Configuration conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(inputDir, "test.txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
writer.write("abc\ndef\t\nghi\njkl");
writer.close();
}
/**
* Reads the output file into a string
*
* @param conf
* @return
* @throws IOException
*/
public String readOutputFile(Configuration conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(outputDir, "part-r-00000");
return UtilsForTests.slurpHadoop(file, localFs);
}
/**
* Creates and runs an MR job
*
* @param conf
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
public void createAndRunJob(Configuration conf) throws IOException,
InterruptedException, ClassNotFoundException {
Job job = Job.getInstance(conf);
job.setJarByClass(TestLineRecordReaderJobs.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
FileInputFormat.addInputPath(job, inputDir);
FileOutputFormat.setOutputPath(job, outputDir);
job.waitForCompletion(true);
}
/**
* Test the case when a custom record delimiter is specified using the
* textinputformat.record.delimiter configuration property
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
public void testCustomRecordDelimiters() throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
conf.set("textinputformat.record.delimiter", "\t\n");
FileSystem localFs = FileSystem.getLocal(conf);
// cleanup
localFs.delete(workDir, true);
// creating input test file
createInputFile(conf);
createAndRunJob(conf);
String expected = "0\tabc\ndef\n9\tghi\njkl\n";
assertEquals(expected, readOutputFile(conf));
}
/**
* Test the default behavior when the textinputformat.record.delimiter
* configuration property is not specified
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
public void testDefaultRecordDelimiters() throws IOException,
InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
// cleanup
localFs.delete(workDir, true);
// creating input test file
createInputFile(conf);
createAndRunJob(conf);
String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
assertEquals(expected, readOutputFile(conf));
}
}
| 4,482 | 31.963235 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRKeyValueTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.BitSet;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class TestMRKeyValueTextInputFormat {
private static final Log LOG =
LogFactory.getLog(TestMRKeyValueTextInputFormat.class.getName());
private static Configuration defaultConf = new Configuration();
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestKeyValueTextInputFormat");
@Test
public void testFormat() throws Exception {
Job job = Job.getInstance(new Configuration(defaultConf));
Path file = new Path(workDir, "test.txt");
int seed = new Random().nextInt();
LOG.info("seed = " + seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int MAX_LENGTH = 10000;
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.size(); j++) {
LOG.debug("split["+j+"]= " + splits.get(j));
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<Text, Text> reader = format.createRecordReader(
splits.get(j), context);
Class<?> clazz = reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.",
KeyValueLineRecordReader.class, clazz);
MapContext<Text, Text, Text, Text> mcontext =
new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), splits.get(j));
reader.initialize(splits.get(j), mcontext);
Text key = null;
Text value = null;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
clazz = key.getClass();
assertEquals("Key class is Text.", Text.class, clazz);
value = reader.getCurrentValue();
clazz = value.getClass();
assertEquals("Value class is Text.", Text.class, clazz);
final int k = Integer.parseInt(key.toString());
final int v = Integer.parseInt(value.toString());
assertEquals("Bad key", 0, k % 2);
assertEquals("Mismatched key/value", k / 2, v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]=" + splits.get(j) +" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
@Test
public void testSplitableCodecs() throws Exception {
final Job job = Job.getInstance(defaultConf);
final Configuration conf = job.getConfiguration();
// Create the codec
CompressionCodec codec = null;
try {
codec = (CompressionCodec)
ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file = new Path(workDir, "test"+codec.getDefaultExtension());
int seed = new Random().nextInt();
LOG.info("seed = " + seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int MAX_LENGTH = 500000;
FileInputFormat.setMaxInputSplitSize(job, MAX_LENGTH / 20);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
// create a file with length entries
Writer writer =
new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
assertTrue("KVTIF claims not splittable", format.isSplitable(job, file));
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.size(); j++) {
LOG.debug("split["+j+"]= " + splits.get(j));
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<Text, Text> reader = format.createRecordReader(
splits.get(j), context);
Class<?> clazz = reader.getClass();
MapContext<Text, Text, Text, Text> mcontext =
new MapContextImpl<Text, Text, Text, Text>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), splits.get(j));
reader.initialize(splits.get(j), mcontext);
Text key = null;
Text value = null;
try {
int count = 0;
while (reader.nextKeyValue()) {
key = reader.getCurrentKey();
value = reader.getCurrentValue();
final int k = Integer.parseInt(key.toString());
final int v = Integer.parseInt(value.toString());
assertEquals("Bad key", 0, k % 2);
assertEquals("Mismatched key/value", k / 2, v);
LOG.debug("read " + k + "," + v);
assertFalse(k + "," + v + " in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
if (count > 0) {
LOG.info("splits["+j+"]="+splits.get(j)+" count=" + count);
} else {
LOG.debug("splits["+j+"]="+splits.get(j)+" count=" + count);
}
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
private LineReader makeStream(String str) throws IOException {
return new LineReader(new ByteArrayInputStream
(str.getBytes("UTF-8")),
defaultConf);
}
@Test
public void testUTF8() throws Exception {
LineReader in = makeStream("abcd\u20acbdcd\u20ac");
Text line = new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters",
"abcd\u20acbdcd\u20ac", line.toString());
in = makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline", "abc\u200axyz", line.toString());
}
@Test
public void testNewLines() throws Exception {
LineReader in = makeStream("a\nbb\n\nccc\rdddd\r\neeeee");
Text out = new Text();
in.readLine(out);
assertEquals("line1 length", 1, out.getLength());
in.readLine(out);
assertEquals("line2 length", 2, out.getLength());
in.readLine(out);
assertEquals("line3 length", 0, out.getLength());
in.readLine(out);
assertEquals("line4 length", 3, out.getLength());
in.readLine(out);
assertEquals("line5 length", 4, out.getLength());
in.readLine(out);
assertEquals("line5 length", 5, out.getLength());
assertEquals("end of file", 0, in.readLine(out));
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static List<Text> readSplit(KeyValueTextInputFormat format,
InputSplit split, Job job) throws IOException, InterruptedException {
List<Text> result = new ArrayList<Text>();
Configuration conf = job.getConfiguration();
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(conf);
RecordReader<Text, Text> reader = format.createRecordReader(split,
MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
MapContext<Text, Text, Text, Text> mcontext =
new MapContextImpl<Text, Text, Text, Text>(conf,
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(),
split);
reader.initialize(split, mcontext);
while (reader.nextKeyValue()) {
result.add(new Text(reader.getCurrentValue()));
}
reader.close();
return result;
}
/**
* Test using the gzip codec for reading
*/
@Test
public void testGzip() throws IOException, InterruptedException {
Configuration conf = new Configuration(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, conf);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"line-1\tthe quick\nline-2\tbrown\nline-3\t" +
"fox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"line-1\tthis is a test\nline-1\tof gzip\n");
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, workDir);
KeyValueTextInputFormat format = new KeyValueTextInputFormat();
List<InputSplit> splits = format.getSplits(job);
assertEquals("compressed splits == 2", 2, splits.size());
FileSplit tmp = (FileSplit) splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0, splits.get(1));
splits.set(1, tmp);
}
List<Text> results = readSplit(format, splits.get(0), job);
assertEquals("splits[0] length", 6, results.size());
assertEquals("splits[0][0]", "the quick", results.get(0).toString());
assertEquals("splits[0][1]", "brown", results.get(1).toString());
assertEquals("splits[0][2]", "fox jumped", results.get(2).toString());
assertEquals("splits[0][3]", "over", results.get(3).toString());
assertEquals("splits[0][4]", " the lazy", results.get(4).toString());
assertEquals("splits[0][5]", " dog", results.get(5).toString());
results = readSplit(format, splits.get(1), job);
assertEquals("splits[1] length", 2, results.size());
assertEquals("splits[1][0]", "this is a test",
results.get(0).toString());
assertEquals("splits[1][1]", "of gzip",
results.get(1).toString());
}
public static void main(String[] args) throws Exception {
new TestMRKeyValueTextInputFormat().testFormat();
}
}
| 14,300 | 37.443548 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRSequenceFileAsBinaryInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import junit.framework.TestCase;
public class TestMRSequenceFileAsBinaryInputFormat extends TestCase {
private static final int RECORDS = 10000;
public void testBinary() throws IOException, InterruptedException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Text tkey = new Text();
Text tval = new Text();
SequenceFile.Writer writer = new SequenceFile.Writer(fs,
job.getConfiguration(), file, Text.class, Text.class);
try {
for (int i = 0; i < RECORDS; ++i) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
writer.append(tkey, tval);
}
} finally {
writer.close();
}
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat<BytesWritable,BytesWritable> bformat =
new SequenceFileAsBinaryInputFormat();
int count = 0;
r.setSeed(seed);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
Text cmpkey = new Text();
Text cmpval = new Text();
DataInputBuffer buf = new DataInputBuffer();
FileInputFormat.setInputPaths(job, file);
for (InputSplit split : bformat.getSplits(job)) {
RecordReader<BytesWritable, BytesWritable> reader =
bformat.createRecordReader(split, context);
MapContext<BytesWritable, BytesWritable, BytesWritable, BytesWritable>
mcontext = new MapContextImpl<BytesWritable, BytesWritable,
BytesWritable, BytesWritable>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(),
split);
reader.initialize(split, mcontext);
try {
while (reader.nextKeyValue()) {
bkey = reader.getCurrentKey();
bval = reader.getCurrentValue();
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
buf.reset(bkey.getBytes(), bkey.getLength());
cmpkey.readFields(buf);
buf.reset(bval.getBytes(), bval.getLength());
cmpval.readFields(buf);
assertTrue(
"Keys don't match: " + "*" + cmpkey.toString() + ":" +
tkey.toString() + "*",
cmpkey.toString().equals(tkey.toString()));
assertTrue(
"Vals don't match: " + "*" + cmpval.toString() + ":" +
tval.toString() + "*",
cmpval.toString().equals(tval.toString()));
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
}
| 4,380 | 36.444444 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
public class TestCombineTextInputFormat {
private static final Log LOG =
LogFactory.getLog(TestCombineTextInputFormat.class);
private static Configuration defaultConf = new Configuration();
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestCombineTextInputFormat");
@Test(timeout=10000)
public void testFormat() throws Exception {
Job job = Job.getInstance(new Configuration(defaultConf));
Random random = new Random();
long seed = random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int length = 10000;
final int numFiles = 10;
// create files with various lengths
createFiles(length, numFiles, random);
// create a combined split for the files
CombineTextInputFormat format = new CombineTextInputFormat();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(length/20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List<InputSplit> splits = format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
// we should have a single split as the length is comfortably smaller than
// the block size
assertEquals("We got more than one splits!", 1, splits.size());
InputSplit split = splits.get(0);
assertEquals("It should be CombineFileSplit",
CombineFileSplit.class, split.getClass());
// check the split
BitSet bits = new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, Text> reader =
format.createRecordReader(split, context);
assertEquals("reader class is CombineFileRecordReader.",
CombineFileRecordReader.class, reader.getClass());
MapContext<LongWritable,Text,LongWritable,Text> mcontext =
new MapContextImpl<LongWritable,Text,LongWritable,Text>(job.getConfiguration(),
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
try {
int count = 0;
while (reader.nextKeyValue()) {
LongWritable key = reader.getCurrentKey();
assertNotNull("Key should not be null.", key);
Text value = reader.getCurrentValue();
final int v = Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count=" + count);
} finally {
reader.close();
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
private static class Range {
private final int start;
private final int end;
Range(int start, int end) {
this.start = start;
this.end = end;
}
@Override
public String toString() {
return "(" + start + ", " + end + ")";
}
}
private static Range[] createRanges(int length, int numFiles, Random random) {
// generate a number of files with various lengths
Range[] ranges = new Range[numFiles];
for (int i = 0; i < numFiles; i++) {
int start = i == 0 ? 0 : ranges[i-1].end;
int end = i == numFiles - 1 ?
length :
(length/numFiles)*(2*i + 1)/2 + random.nextInt(length/numFiles) + 1;
ranges[i] = new Range(start, end);
}
return ranges;
}
private static void createFiles(int length, int numFiles, Random random)
throws IOException {
Range[] ranges = createRanges(length, numFiles, random);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(workDir, "test_" + i + ".txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
Range range = ranges[i];
try {
for (int j = range.start; j < range.end; j++) {
writer.write(Integer.toString(j));
writer.write("\n");
}
} finally {
writer.close();
}
}
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static List<Text> readSplit(InputFormat<LongWritable,Text> format,
InputSplit split, Job job) throws IOException, InterruptedException {
List<Text> result = new ArrayList<Text>();
Configuration conf = job.getConfiguration();
TaskAttemptContext context = MapReduceTestUtil.
createDummyMapTaskAttemptContext(conf);
RecordReader<LongWritable, Text> reader = format.createRecordReader(split,
MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
MapContext<LongWritable,Text,LongWritable,Text> mcontext =
new MapContextImpl<LongWritable,Text,LongWritable,Text>(conf,
context.getTaskAttemptID(), reader, null, null,
MapReduceTestUtil.createDummyReporter(),
split);
reader.initialize(split, mcontext);
while (reader.nextKeyValue()) {
result.add(new Text(reader.getCurrentValue()));
}
return result;
}
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000)
public void testGzip() throws IOException, InterruptedException {
Configuration conf = new Configuration(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, conf);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"this is a test\nof gzip\n");
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, workDir);
CombineTextInputFormat format = new CombineTextInputFormat();
List<InputSplit> splits = format.getSplits(job);
assertEquals("compressed splits == 1", 1, splits.size());
List<Text> results = readSplit(format, splits.get(0), job);
assertEquals("splits[0] length", 8, results.size());
final String[] firstList =
{"the quick", "brown", "fox jumped", "over", " the lazy", " dog"};
final String[] secondList = {"this is a test", "of gzip"};
String first = results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results, firstList, secondList);
} else if (first.equals(secondList[0])) {
testResults(results, secondList, firstList);
} else {
fail("unexpected first token!");
}
}
private static void testResults(List<Text> results, String[] first,
String[] second) {
for (int i = 0; i < first.length; i++) {
assertEquals("splits[0]["+i+"]", first[i], results.get(i).toString());
}
for (int i = 0; i < second.length; i++) {
int j = i + first.length;
assertEquals("splits[0]["+j+"]", second[i], results.get(j).toString());
}
}
}
| 9,696 | 35.182836 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.junit.Before;
import org.junit.Test;
/**
* @see TestDelegatingInputFormat
*/
public class TestMultipleInputs extends HadoopTestCase {
public TestMultipleInputs() throws IOException {
super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
}
private static final Path ROOT_DIR = new Path("testing/mo");
private static final Path IN1_DIR = new Path(ROOT_DIR, "input1");
private static final Path IN2_DIR = new Path(ROOT_DIR, "input2");
private static final Path OUT_DIR = new Path(ROOT_DIR, "output");
private Path getDir(Path dir) {
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp")
.replace(' ', '+');
dir = new Path(localPathRoot, dir);
}
return dir;
}
@Before
public void setUp() throws Exception {
super.setUp();
Path rootDir = getDir(ROOT_DIR);
Path in1Dir = getDir(IN1_DIR);
Path in2Dir = getDir(IN2_DIR);
Configuration conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(rootDir, true);
if (!fs.mkdirs(in1Dir)) {
throw new IOException("Mkdirs failed to create " + in1Dir.toString());
}
if (!fs.mkdirs(in2Dir)) {
throw new IOException("Mkdirs failed to create " + in2Dir.toString());
}
}
@Test
public void testDoMultipleInputs() throws IOException {
Path in1Dir = getDir(IN1_DIR);
Path in2Dir = getDir(IN2_DIR);
Path outDir = getDir(OUT_DIR);
Configuration conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
DataOutputStream file1 = fs.create(new Path(in1Dir, "part-0"));
file1.writeBytes("a\nb\nc\nd\ne");
file1.close();
// write tab delimited to second file because we're doing
// KeyValueInputFormat
DataOutputStream file2 = fs.create(new Path(in2Dir, "part-0"));
file2.writeBytes("a\tblah\nb\tblah\nc\tblah\nd\tblah\ne\tblah");
file2.close();
Job job = Job.getInstance(conf);
job.setJobName("mi");
MultipleInputs.addInputPath(job, in1Dir, TextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(job, in2Dir, KeyValueTextInputFormat.class,
KeyValueMapClass.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
job.setReducerClass(ReducerClass.class);
FileOutputFormat.setOutputPath(job, outDir);
boolean success = false;
try {
success = job.waitForCompletion(true);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
} catch (ClassNotFoundException instante) {
throw new RuntimeException(instante);
}
if (!success)
throw new RuntimeException("Job failed!");
// copy bytes a bunch of times for the ease of readLine() - whatever
BufferedReader output = new BufferedReader(new InputStreamReader(fs
.open(new Path(outDir, "part-r-00000"))));
// reducer should have counted one key from each file
assertTrue(output.readLine().equals("a 2"));
assertTrue(output.readLine().equals("b 2"));
assertTrue(output.readLine().equals("c 2"));
assertTrue(output.readLine().equals("d 2"));
assertTrue(output.readLine().equals("e 2"));
}
@SuppressWarnings("unchecked")
public void testAddInputPathWithFormat() throws IOException {
final Job conf = Job.getInstance();
MultipleInputs.addInputPath(conf, new Path("/foo"), TextInputFormat.class);
MultipleInputs.addInputPath(conf, new Path("/bar"),
KeyValueTextInputFormat.class);
final Map<Path, InputFormat> inputs = MultipleInputs
.getInputFormatMap(conf);
assertEquals(TextInputFormat.class, inputs.get(new Path("/foo")).getClass());
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar"))
.getClass());
}
@SuppressWarnings("unchecked")
public void testAddInputPathWithMapper() throws IOException {
final Job conf = Job.getInstance();
MultipleInputs.addInputPath(conf, new Path("/foo"), TextInputFormat.class,
MapClass.class);
MultipleInputs.addInputPath(conf, new Path("/bar"),
KeyValueTextInputFormat.class, KeyValueMapClass.class);
final Map<Path, InputFormat> inputs = MultipleInputs
.getInputFormatMap(conf);
final Map<Path, Class<? extends Mapper>> maps = MultipleInputs
.getMapperTypeMap(conf);
assertEquals(TextInputFormat.class, inputs.get(new Path("/foo")).getClass());
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar"))
.getClass());
assertEquals(MapClass.class, maps.get(new Path("/foo")));
assertEquals(KeyValueMapClass.class, maps.get(new Path("/bar")));
}
static final Text blah = new Text("blah");
// these 3 classes do a reduce side join with 2 different mappers
static class MapClass extends Mapper<LongWritable, Text, Text, Text> {
// receives "a", "b", "c" as values
@Override
public void map(LongWritable key, Text value, Context ctx)
throws IOException, InterruptedException {
ctx.write(value, blah);
}
}
static class KeyValueMapClass extends Mapper<Text, Text, Text, Text> {
// receives "a", "b", "c" as keys
@Override
public void map(Text key, Text value, Context ctx) throws IOException,
InterruptedException {
ctx.write(key, blah);
}
}
static class ReducerClass extends Reducer<Text, Text, NullWritable, Text> {
// should receive 2 rows for each key
int count = 0;
@Override
public void reduce(Text key, Iterable<Text> values, Context ctx)
throws IOException, InterruptedException {
count = 0;
for (Text value : values)
count++;
ctx.write(NullWritable.get(), new Text(key.toString() + " " + count));
}
}
}
| 7,549 | 34.952381 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/aggregate/AggregatorTests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import org.apache.hadoop.io.Text;
import java.util.ArrayList;
import java.util.Map.Entry;
public class AggregatorTests extends ValueAggregatorBaseDescriptor {
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key, Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String [] words = val.toString().split(" ");
String countType;
String id;
Entry<Text, Text> e;
for (String word: words) {
long numVal = Long.parseLong(word);
countType = LONG_VALUE_SUM;
id = "count_" + word;
e = generateEntry(countType, id, ONE);
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MAX;
id = "max";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = LONG_VALUE_MIN;
id = "min";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MAX;
id = "value_as_string_max";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = STRING_VALUE_MIN;
id = "value_as_string_min";
e = generateEntry(countType, id, new Text(""+numVal));
if (e != null) {
retv.add(e);
}
countType = UNIQ_VALUE_COUNT;
id = "uniq_count";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
countType = VALUE_HISTOGRAM;
id = "histogram";
e = generateEntry(countType, id, new Text(word));
if (e != null) {
retv.add(e);
}
}
return retv;
}
}
| 2,623 | 28.483146 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/aggregate/TestMapReduceAggregates.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import junit.framework.TestCase;
import java.io.*;
import java.text.NumberFormat;
public class TestMapReduceAggregates extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
public void testAggregates() throws Exception {
launch();
}
public static void launch() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
int numOfInputLines = 20;
Path OUTPUT_DIR = new Path("build/test/output_for_aggregates_test");
Path INPUT_DIR = new Path("build/test/input_for_aggregates_test");
String inputFile = "input.txt";
fs.delete(INPUT_DIR, true);
fs.mkdirs(INPUT_DIR);
fs.delete(OUTPUT_DIR, true);
StringBuffer inputData = new StringBuffer();
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append("max\t19\n");
expectedOutput.append("min\t1\n");
FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
for (int i = 1; i < numOfInputLines; i++) {
expectedOutput.append("count_").append(idFormat.format(i));
expectedOutput.append("\t").append(i).append("\n");
inputData.append(idFormat.format(i));
for (int j = 1; j < i; j++) {
inputData.append(" ").append(idFormat.format(i));
}
inputData.append("\n");
}
expectedOutput.append("value_as_string_max\t9\n");
expectedOutput.append("value_as_string_min\t1\n");
expectedOutput.append("uniq_count\t15\n");
fileOut.write(inputData.toString().getBytes("utf-8"));
fileOut.close();
System.out.println("inputData:");
System.out.println(inputData.toString());
conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, 1);
conf.set(ValueAggregatorJobBase.DESCRIPTOR + ".0",
"UserDefined,org.apache.hadoop.mapreduce.lib.aggregate.AggregatorTests");
conf.setLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, 14);
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, INPUT_DIR);
job.setInputFormatClass(TextInputFormat.class);
FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
job.setMapperClass(ValueAggregatorMapper.class);
job.setReducerClass(ValueAggregatorReducer.class);
job.setCombinerClass(ValueAggregatorCombiner.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
String outdata = MapReduceTestUtil.readOutput(OUTPUT_DIR, conf);
System.out.println("full out data:");
System.out.println(outdata.toString());
outdata = outdata.substring(0, expectedOutput.toString().length());
assertEquals(expectedOutput.toString(),outdata);
fs.delete(OUTPUT_DIR, true);
fs.delete(INPUT_DIR, true);
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}
| 4,748 | 34.706767 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/util/TestMRCJCReflectionUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
/**
* Test for the JobConf-related parts of common's ReflectionUtils
* class.
*/
public class TestMRCJCReflectionUtils {
@Before
public void setUp() {
ReflectionUtils.clearCache();
}
/**
* This is to test backward compatibility of ReflectionUtils for
* JobConfigurable objects.
* This should be made deprecated along with the mapred package HADOOP-1230.
* Should be removed when mapred package is removed.
*/
@Test
public void testSetConf() {
JobConfigurableOb ob = new JobConfigurableOb();
ReflectionUtils.setConf(ob, new Configuration());
assertFalse(ob.configured);
ReflectionUtils.setConf(ob, new JobConf());
assertTrue(ob.configured);
}
private static class JobConfigurableOb implements JobConfigurable {
boolean configured;
public void configure(JobConf job) {
configured = true;
}
}
}
| 1,937 | 30.770492 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/util/TestMRCJCRunJar.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
/**
* A test to rest the RunJar class.
*/
public class TestMRCJCRunJar {
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString();
private static final String TEST_JAR_NAME = "testjar.jar";
private static final String CLASS_NAME = "Hello.class";
@Test
public void testRunjar() throws Throwable {
File outFile = new File(TEST_ROOT_DIR, "out");
// delete if output file already exists.
if (outFile.exists()) {
outFile.delete();
}
File makeTestJar = makeTestJar();
String[] args = new String[3];
args[0] = makeTestJar.getAbsolutePath();
args[1] = "org.apache.hadoop.util.Hello";
args[2] = outFile.toString();
RunJar.main(args);
Assert.assertTrue("RunJar failed", outFile.exists());
}
private File makeTestJar() throws IOException {
File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
JarOutputStream jstream = new JarOutputStream(new FileOutputStream(jarFile));
InputStream entryInputStream = this.getClass().getResourceAsStream(
CLASS_NAME);
ZipEntry entry = new ZipEntry("org/apache/hadoop/util/" + CLASS_NAME);
jstream.putNextEntry(entry);
BufferedInputStream bufInputStream = new BufferedInputStream(
entryInputStream, 2048);
int count;
byte[] data = new byte[2048];
while ((count = bufInputStream.read(data, 0, 2048)) != -1) {
jstream.write(data, 0, count);
}
jstream.closeEntry();
jstream.close();
return jarFile;
}
}
| 2,666 | 32.3375 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/util/Hello.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* A simple Hello class that is called from TestRunJar
*
*/
public class Hello {
public static void main(String[] args) {
try {
System.out.println("Creating file" + args[0]);
FileOutputStream fstream = new FileOutputStream(args[0]);
fstream.write("Hello Hadoopers".getBytes());
fstream.close();
} catch (IOException e) {
// do nothing
}
}
}
| 1,297 | 31.45 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.apache.hadoop.io.TestSequenceFile;
import org.apache.hadoop.mapred.BigMapOutput;
import org.apache.hadoop.mapred.GenericMRLoadGenerator;
import org.apache.hadoop.mapred.MRBench;
import org.apache.hadoop.mapred.ReliabilityTest;
import org.apache.hadoop.mapred.SortValidator;
import org.apache.hadoop.mapred.TestMapRed;
import org.apache.hadoop.mapred.TestSequenceFileInputFormat;
import org.apache.hadoop.mapred.TestTextInputFormat;
import org.apache.hadoop.mapred.ThreadedMapBenchmark;
import org.apache.hadoop.mapreduce.FailJob;
import org.apache.hadoop.mapreduce.LargeSorter;
import org.apache.hadoop.mapreduce.MiniHadoopClusterManager;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.util.ProgramDriver;
import org.apache.hadoop.hdfs.NNBench;
import org.apache.hadoop.hdfs.NNBenchWithoutMR;
import org.apache.hadoop.fs.TestFileSystem;
import org.apache.hadoop.fs.TestDFSIO;
import org.apache.hadoop.fs.DFSCIOTest;
import org.apache.hadoop.fs.DistributedFSCheck;
import org.apache.hadoop.io.FileBench;
import org.apache.hadoop.fs.JHLogAnalyzer;
import org.apache.hadoop.fs.loadGenerator.DataGenerator;
import org.apache.hadoop.fs.loadGenerator.LoadGenerator;
import org.apache.hadoop.fs.loadGenerator.LoadGeneratorMR;
import org.apache.hadoop.fs.loadGenerator.StructureGenerator;
import org.apache.hadoop.fs.slive.SliveTest;
/**
* Driver for Map-reduce tests.
*
*/
public class MapredTestDriver {
private ProgramDriver pgd;
public MapredTestDriver() {
this(new ProgramDriver());
}
public MapredTestDriver(ProgramDriver pgd) {
this.pgd = pgd;
try {
pgd.addClass("testsequencefile", TestSequenceFile.class,
"A test for flat files of binary key value pairs.");
pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class,
"A map/reduce benchmark that compares the performance " +
"of maps with multiple spills over maps with 1 spill");
pgd.addClass("mrbench", MRBench.class,
"A map/reduce benchmark that can create many small jobs");
pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
pgd.addClass("testsequencefileinputformat",
TestSequenceFileInputFormat.class,
"A test for sequence file input format.");
pgd.addClass("testtextinputformat", TestTextInputFormat.class,
"A test for text input format.");
pgd.addClass("testmapredsort", SortValidator.class,
"A map/reduce program that validates the " +
"map-reduce framework's sort.");
pgd.addClass("testbigmapoutput", BigMapOutput.class,
"A map/reduce program that works on a very big " +
"non-splittable file and does identity map/reduce");
pgd.addClass("loadgen", GenericMRLoadGenerator.class,
"Generic map/reduce load generator");
pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
"A program that tests the reliability of the MR framework by " +
"injecting faults/failures");
pgd.addClass("fail", FailJob.class, "a job that always fails");
pgd.addClass("sleep", SleepJob.class,
"A job that sleeps at each map and reduce task.");
pgd.addClass("nnbench", NNBench.class,
"A benchmark that stresses the namenode w/ MR.");
pgd.addClass("nnbenchWithoutMR", NNBenchWithoutMR.class,
"A benchmark that stresses the namenode w/o MR.");
pgd.addClass("testfilesystem", TestFileSystem.class,
"A test for FileSystem read/write.");
pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class,
"Distributed i/o benchmark.");
pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
"Distributed i/o benchmark of libhdfs.");
pgd.addClass("DistributedFSCheck", DistributedFSCheck.class,
"Distributed checkup of the file system consistency.");
pgd.addClass("filebench", FileBench.class,
"Benchmark SequenceFile(Input|Output)Format " +
"(block,record compressed and uncompressed), " +
"Text(Input|Output)Format (compressed and uncompressed)");
pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class,
"Job History Log analyzer.");
pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class,
"HDFS Stress Test and Live Data Verification.");
pgd.addClass("minicluster", MiniHadoopClusterManager.class,
"Single process HDFS and MR cluster.");
pgd.addClass("largesorter", LargeSorter.class,
"Large-Sort tester");
pgd.addClass("NNloadGenerator", LoadGenerator.class,
"Generate load on Namenode using NN loadgenerator run WITHOUT MR");
pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
"Generate load on Namenode using NN loadgenerator run as MR job");
pgd.addClass("NNstructureGenerator", StructureGenerator.class,
"Generate the structure to be used by NNdataGenerator");
pgd.addClass("NNdataGenerator", DataGenerator.class,
"Generate the data to be used by NNloadGenerator");
} catch(Throwable e) {
e.printStackTrace();
}
}
public void run(String argv[]) {
int exitCode = -1;
try {
exitCode = pgd.run(argv);
} catch(Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
public static void main(String argv[]){
new MapredTestDriver().run(argv);
}
}
| 6,307 | 42.503448 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClientCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Basic testing for the MiniMRClientCluster. This test shows an example class
* that can be used in MR1 or MR2, without any change to the test. The test will
* use MiniMRYarnCluster in MR2, and MiniMRCluster in MR1.
*/
public class TestMiniMRClientCluster {
private static Path inDir = null;
private static Path outDir = null;
private static Path testdir = null;
private static Path[] inFiles = new Path[5];
private static MiniMRClientCluster mrCluster;
private class InternalClass {
}
@BeforeClass
public static void setup() throws IOException {
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
"/tmp"));
testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
inDir = new Path(testdir, "in");
outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(inDir, "part_" + i);
createFile(inFiles[i], conf);
}
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(
InternalClass.class, 1, new Configuration());
}
@AfterClass
public static void cleanup() throws IOException {
// clean up the input and output files
final Configuration conf = new Configuration();
final FileSystem fs = testdir.getFileSystem(conf);
if (fs.exists(testdir)) {
fs.delete(testdir, true);
}
// stopping the mini cluster
mrCluster.stop();
}
@Test
public void testRestart() throws Exception {
String rmAddress1 = mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress1 = mrCluster.getConfig().get(
YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress1 = mrCluster.getConfig().get(
YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress1 = mrCluster.getConfig().get(
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress1 = mrCluster.getConfig().get(
YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress1 = mrCluster.getConfig().get(
JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress1 = mrCluster.getConfig().get(
JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
mrCluster.restart();
String rmAddress2 = mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress2 = mrCluster.getConfig().get(
YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress2 = mrCluster.getConfig().get(
YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress2 = mrCluster.getConfig().get(
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress2 = mrCluster.getConfig().get(
YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress2 = mrCluster.getConfig().get(
JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress2 = mrCluster.getConfig().get(
JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
assertEquals("Address before restart: " + rmAddress1
+ " is different from new address: " + rmAddress2, rmAddress1,
rmAddress2);
assertEquals("Address before restart: " + rmAdminAddress1
+ " is different from new address: " + rmAdminAddress2,
rmAdminAddress1, rmAdminAddress2);
assertEquals("Address before restart: " + rmSchedAddress1
+ " is different from new address: " + rmSchedAddress2,
rmSchedAddress1, rmSchedAddress2);
assertEquals("Address before restart: " + rmRstrackerAddress1
+ " is different from new address: " + rmRstrackerAddress2,
rmRstrackerAddress1, rmRstrackerAddress2);
assertEquals("Address before restart: " + rmWebAppAddress1
+ " is different from new address: " + rmWebAppAddress2,
rmWebAppAddress1, rmWebAppAddress2);
assertEquals("Address before restart: " + mrHistAddress1
+ " is different from new address: " + mrHistAddress2, mrHistAddress1,
mrHistAddress2);
assertEquals("Address before restart: " + mrHistWebAppAddress1
+ " is different from new address: " + mrHistWebAppAddress2,
mrHistWebAppAddress1, mrHistWebAppAddress2);
}
@Test
public void testJob() throws Exception {
final Job job = createJob();
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,
inDir);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,
new Path(outDir, "testJob"));
assertTrue(job.waitForCompletion(true));
validateCounters(job.getCounters(), 5, 25, 5, 5);
}
private void validateCounters(Counters counters, long mapInputRecords,
long mapOutputRecords, long reduceInputGroups, long reduceOutputRecords) {
assertEquals("MapInputRecords", mapInputRecords, counters.findCounter(
"MyCounterGroup", "MAP_INPUT_RECORDS").getValue());
assertEquals("MapOutputRecords", mapOutputRecords, counters.findCounter(
"MyCounterGroup", "MAP_OUTPUT_RECORDS").getValue());
assertEquals("ReduceInputGroups", reduceInputGroups, counters.findCounter(
"MyCounterGroup", "REDUCE_INPUT_GROUPS").getValue());
assertEquals("ReduceOutputRecords", reduceOutputRecords, counters
.findCounter("MyCounterGroup", "REDUCE_OUTPUT_RECORDS").getValue());
}
private static void createFile(Path inFile, Configuration conf)
throws IOException {
final FileSystem fs = inFile.getFileSystem(conf);
if (fs.exists(inFile)) {
return;
}
FSDataOutputStream out = fs.create(inFile);
out.writeBytes("This is a test file");
out.close();
}
public static Job createJob() throws IOException {
final Job baseJob = Job.getInstance(mrCluster.getConfig());
baseJob.setOutputKeyClass(Text.class);
baseJob.setOutputValueClass(IntWritable.class);
baseJob.setMapperClass(MyMapper.class);
baseJob.setReducerClass(MyReducer.class);
baseJob.setNumReduceTasks(1);
return baseJob;
}
public static class MyMapper extends
org.apache.hadoop.mapreduce.Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
context.getCounter("MyCounterGroup", "MAP_INPUT_RECORDS").increment(1);
StringTokenizer iter = new StringTokenizer(value.toString());
while (iter.hasMoreTokens()) {
word.set(iter.nextToken());
context.write(word, one);
context.getCounter("MyCounterGroup", "MAP_OUTPUT_RECORDS").increment(1);
}
}
}
public static class MyReducer extends
org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
context.getCounter("MyCounterGroup", "REDUCE_INPUT_GROUPS").increment(1);
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
context.getCounter("MyCounterGroup", "REDUCE_OUTPUT_RECORDS")
.increment(1);
}
}
}
| 9,142 | 37.906383 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* A JUnit test to test Mini Map-Reduce Cluster with Mini-DFS.
*/
public class TestMiniMRWithDFSWithDistinctUsers {
static final UserGroupInformation DFS_UGI = createUGI("dfs", true);
static final UserGroupInformation ALICE_UGI = createUGI("alice", false);
static final UserGroupInformation BOB_UGI = createUGI("bob", false);
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fs = null;
Configuration conf = new Configuration();
static UserGroupInformation createUGI(String name, boolean issuper) {
String group = issuper? "supergroup": name;
return UserGroupInformation.createUserForTesting(name, new String[]{group});
}
static void mkdir(FileSystem fs, String dir,
String user, String group, short mode) throws IOException {
Path p = new Path(dir);
fs.mkdirs(p);
fs.setPermission(p, new FsPermission(mode));
fs.setOwner(p, user, group);
}
// runs a sample job as a user (ugi)
void runJobAsUser(final JobConf job, UserGroupInformation ugi)
throws Exception {
RunningJob rj = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() {
public RunningJob run() throws IOException {
return JobClient.runJob(job);
}
});
rj.waitForCompletion();
Assert.assertEquals("SUCCEEDED", JobStatus.getJobRunState(rj.getJobState()));
}
@Before
public void setUp() throws Exception {
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
return dfs.getFileSystem();
}
});
// Home directories for users
mkdir(fs, "/user", "nobody", "nogroup", (short)01777);
mkdir(fs, "/user/alice", "alice", "nogroup", (short)0755);
mkdir(fs, "/user/bob", "bob", "nogroup", (short)0755);
// staging directory root with sticky bit
UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
mkdir(fs, "/staging", MR_UGI.getShortUserName(), "nogroup", (short)01777);
JobConf mrConf = new JobConf();
mrConf.set(JTConfig.JT_STAGING_AREA_ROOT, "/staging");
mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
1, null, null, MR_UGI, mrConf);
}
@After
public void tearDown() throws Exception {
if (mr != null) { mr.shutdown();}
if (dfs != null) { dfs.shutdown(); }
}
@Test
public void testDistinctUsers() throws Exception {
JobConf job1 = mr.createJobConf();
String input = "The quick brown fox\nhas many silly\n"
+ "red fox sox\n";
Path inDir = new Path("/testing/distinct/input");
Path outDir = new Path("/user/alice/output");
TestMiniMRClasspath
.configureWordCount(fs, job1, input, 2, 1, inDir, outDir);
runJobAsUser(job1, ALICE_UGI);
JobConf job2 = mr.createJobConf();
Path inDir2 = new Path("/testing/distinct/input2");
Path outDir2 = new Path("/user/bob/output2");
TestMiniMRClasspath.configureWordCount(fs, job2, input, 2, 1, inDir2,
outDir2);
runJobAsUser(job2, BOB_UGI);
}
/**
* Regression test for MAPREDUCE-2327. Verifies that, even if a map
* task makes lots of spills (more than fit in the spill index cache)
* that it will succeed.
*/
@Test
public void testMultipleSpills() throws Exception {
JobConf job1 = mr.createJobConf();
// Make sure it spills twice
job1.setFloat(MRJobConfig.MAP_SORT_SPILL_PERCENT, 0.0001f);
job1.setInt(MRJobConfig.IO_SORT_MB, 1);
// Make sure the spill records don't fit in index cache
job1.setInt(MRJobConfig.INDEX_CACHE_MEMORY_LIMIT, 0);
String input = "The quick brown fox\nhas many silly\n"
+ "red fox sox\n";
Path inDir = new Path("/testing/distinct/input");
Path outDir = new Path("/user/alice/output");
TestMiniMRClasspath
.configureWordCount(fs, job1, input, 2, 1, inDir, outDir);
runJobAsUser(job1, ALICE_UGI);
}
}
| 5,416 | 34.874172 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMRNotification.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
/**
* Tests Job end notification in cluster mode.
*/
public class TestClusterMRNotification extends NotificationTestCase {
public TestClusterMRNotification() throws IOException {
super(HadoopTestCase.CLUSTER_MR);
}
}
| 1,101 | 32.393939 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Properties;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase {
public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.write("hello4\n");
wr.close();
if (restart) {
stopCluster();
startCluster(false, null);
}
JobConf conf = createJobConf();
conf.setJobName("mr");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, getInputDir());
FileOutputFormat.setOutputPath(conf, getOutputDir());
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(getOutputDir(),
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
assertTrue(line.contains("hello"));
line = reader.readLine();
}
reader.close();
assertEquals(4, counter);
}
}
public void testMapReduce() throws Exception {
_testMapReduce(false);
}
public void testMapReduceRestarting() throws Exception {
_testMapReduce(true);
}
public void testDFSRestart() throws Exception {
Path file = new Path(getInputDir(), "text.txt");
OutputStream os = getFileSystem().create(file);
Writer wr = new OutputStreamWriter(os);
wr.close();
stopCluster();
startCluster(false, null);
assertTrue(getFileSystem().exists(file));
stopCluster();
startCluster(true, null);
assertFalse(getFileSystem().exists(file));
}
public void testMRConfig() throws Exception {
JobConf conf = createJobConf();
assertNull(conf.get("xyz"));
Properties config = new Properties();
config.setProperty("xyz", "XYZ");
stopCluster();
startCluster(false, config);
conf = createJobConf();
assertEquals("XYZ", conf.get("xyz"));
}
}
| 3,870 | 29.722222 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.Text;
@SuppressWarnings("deprecation")
public class TestMRCJCFileInputFormat extends TestCase {
Configuration conf = new Configuration();
MiniDFSCluster dfs = null;
private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception {
return new MiniDFSCluster.Builder(conf).numDataNodes(4)
.racks(new String[]{"/rack0", "/rack0", "/rack1", "/rack1"})
.hosts(new String[]{"host0", "host1", "host2", "host3"})
.build();
}
public void testLocality() throws Exception {
JobConf job = new JobConf(conf);
dfs = newDFSCluster(job);
FileSystem fs = dfs.getFileSystem();
System.out.println("FileSystem " + fs.getUri());
Path inputDir = new Path("/foo/");
String fileName = "part-0000";
createInputs(fs, inputDir, fileName);
// split it using a file input format
TextInputFormat.addInputPath(job, inputDir);
TextInputFormat inFormat = new TextInputFormat();
inFormat.configure(job);
InputSplit[] splits = inFormat.getSplits(job, 1);
FileStatus fileStatus = fs.getFileStatus(new Path(inputDir, fileName));
BlockLocation[] locations =
fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
System.out.println("Made splits");
// make sure that each split is a block and the locations match
for(int i=0; i < splits.length; ++i) {
FileSplit fileSplit = (FileSplit) splits[i];
System.out.println("File split: " + fileSplit);
for (String h: fileSplit.getLocations()) {
System.out.println("Location: " + h);
}
System.out.println("Block: " + locations[i]);
assertEquals(locations[i].getOffset(), fileSplit.getStart());
assertEquals(locations[i].getLength(), fileSplit.getLength());
String[] blockLocs = locations[i].getHosts();
String[] splitLocs = fileSplit.getLocations();
assertEquals(2, blockLocs.length);
assertEquals(2, splitLocs.length);
assertTrue((blockLocs[0].equals(splitLocs[0]) &&
blockLocs[1].equals(splitLocs[1])) ||
(blockLocs[1].equals(splitLocs[0]) &&
blockLocs[0].equals(splitLocs[1])));
}
assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES,
1, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0));
}
private void createInputs(FileSystem fs, Path inDir, String fileName)
throws IOException, TimeoutException, InterruptedException {
// create a multi-block file on hdfs
Path path = new Path(inDir, fileName);
final short replication = 2;
DataOutputStream out = fs.create(path, true, 4096,
replication, 512, null);
for(int i=0; i < 1000; ++i) {
out.writeChars("Hello\n");
}
out.close();
System.out.println("Wrote file");
DFSTestUtil.waitReplication(fs, path, replication);
}
public void testNumInputs() throws Exception {
JobConf job = new JobConf(conf);
dfs = newDFSCluster(job);
FileSystem fs = dfs.getFileSystem();
System.out.println("FileSystem " + fs.getUri());
Path inputDir = new Path("/foo/");
final int numFiles = 10;
String fileNameBase = "part-0000";
for (int i=0; i < numFiles; ++i) {
createInputs(fs, inputDir, fileNameBase + String.valueOf(i));
}
createInputs(fs, inputDir, "_meta");
createInputs(fs, inputDir, "_temp");
// split it using a file input format
TextInputFormat.addInputPath(job, inputDir);
TextInputFormat inFormat = new TextInputFormat();
inFormat.configure(job);
InputSplit[] splits = inFormat.getSplits(job, 1);
assertEquals("Expected value of " + FileInputFormat.NUM_INPUT_FILES,
numFiles, job.getLong(FileInputFormat.NUM_INPUT_FILES, 0));
}
final Path root = new Path("/TestFileInputFormat");
final Path file1 = new Path(root, "file1");
final Path dir1 = new Path(root, "dir1");
final Path file2 = new Path(dir1, "file2");
static final int BLOCKSIZE = 1024;
static final byte[] databuf = new byte[BLOCKSIZE];
private static final String rack1[] = new String[] {
"/r1"
};
private static final String hosts1[] = new String[] {
"host1.rack1.com"
};
private class DummyFileInputFormat extends FileInputFormat<Text, Text> {
@Override
public RecordReader<Text, Text> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return null;
}
}
public void testMultiLevelInput() throws Exception {
JobConf job = new JobConf(conf);
job.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster.Builder(job).racks(rack1).hosts(hosts1).build();
dfs.waitActive();
String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" +
(dfs.getFileSystem()).getUri().getPort();
FileSystem fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(dir1)) {
throw new IOException("Mkdirs failed to create " + root.toString());
}
writeFile(job, file1, (short)1, 1);
writeFile(job, file2, (short)1, 1);
// split it using a CombinedFile input format
DummyFileInputFormat inFormat = new DummyFileInputFormat();
inFormat.setInputPaths(job, root);
// By default, we don't allow multi-level/recursive inputs
boolean exceptionThrown = false;
try {
InputSplit[] splits = inFormat.getSplits(job, 1);
} catch (Exception e) {
exceptionThrown = true;
}
assertTrue("Exception should be thrown by default for scanning a "
+ "directory with directories inside.", exceptionThrown);
// Enable multi-level/recursive inputs
job.setBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, true);
InputSplit[] splits = inFormat.getSplits(job, 1);
assertEquals(splits.length, 2);
}
@SuppressWarnings("rawtypes")
public void testLastInputSplitAtSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1024l * 1024 * 1024,
128l * 1024 * 1024);
JobConf job = new JobConf();
InputSplit[] splits = fif.getSplits(job, 8);
assertEquals(8, splits.length);
for (int i = 0; i < splits.length; i++) {
InputSplit split = splits[i];
assertEquals(("host" + i), split.getLocations()[0]);
}
}
@SuppressWarnings("rawtypes")
public void testLastInputSplitExceedingSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1027l * 1024 * 1024,
128l * 1024 * 1024);
JobConf job = new JobConf();
InputSplit[] splits = fif.getSplits(job, 8);
assertEquals(8, splits.length);
for (int i = 0; i < splits.length; i++) {
InputSplit split = splits[i];
assertEquals(("host" + i), split.getLocations()[0]);
}
}
@SuppressWarnings("rawtypes")
public void testLastInputSplitSingleSplit() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(100l * 1024 * 1024,
128l * 1024 * 1024);
JobConf job = new JobConf();
InputSplit[] splits = fif.getSplits(job, 1);
assertEquals(1, splits.length);
for (int i = 0; i < splits.length; i++) {
InputSplit split = splits[i];
assertEquals(("host" + i), split.getLocations()[0]);
}
}
private class FileInputFormatForTest<K, V> extends FileInputFormat<K, V> {
long splitSize;
long length;
FileInputFormatForTest(long length, long splitSize) {
this.length = length;
this.splitSize = splitSize;
}
@Override
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
return null;
}
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus mockFileStatus = mock(FileStatus.class);
when(mockFileStatus.getBlockSize()).thenReturn(splitSize);
when(mockFileStatus.isDirectory()).thenReturn(false);
Path mockPath = mock(Path.class);
FileSystem mockFs = mock(FileSystem.class);
BlockLocation[] blockLocations = mockBlockLocations(length, splitSize);
when(mockFs.getFileBlockLocations(mockFileStatus, 0, length)).thenReturn(
blockLocations);
when(mockPath.getFileSystem(any(Configuration.class))).thenReturn(mockFs);
when(mockFileStatus.getPath()).thenReturn(mockPath);
when(mockFileStatus.getLen()).thenReturn(length);
FileStatus[] fs = new FileStatus[1];
fs[0] = mockFileStatus;
return fs;
}
@Override
protected long computeSplitSize(long blockSize, long minSize, long maxSize) {
return splitSize;
}
private BlockLocation[] mockBlockLocations(long size, long splitSize) {
int numLocations = (int) (size / splitSize);
if (size % splitSize != 0)
numLocations++;
BlockLocation[] blockLocations = new BlockLocation[numLocations];
for (int i = 0; i < numLocations; i++) {
String[] names = new String[] { "b" + i };
String[] hosts = new String[] { "host" + i };
blockLocations[i] = new BlockLocation(names, hosts, i * splitSize,
Math.min(splitSize, size - (splitSize * i)));
}
return blockLocations;
}
}
static void writeFile(Configuration conf, Path name,
short replication, int numBlocks)
throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
FSDataOutputStream stm = fileSys.create(name, true,
conf.getInt("io.file.buffer.size", 4096),
replication, (long)BLOCKSIZE);
for (int i = 0; i < numBlocks; i++) {
stm.write(databuf);
}
stm.close();
DFSTestUtil.waitReplication(fileSys, name, replication);
}
@Override
public void tearDown() throws Exception {
if (dfs != null) {
dfs.shutdown();
dfs = null;
}
}
}
| 11,381 | 35.018987 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCleanup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* A JUnit test to test Map-Reduce job cleanup.
*/
@SuppressWarnings("deprecation")
public class TestJobCleanup {
private static String TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp") + "/" + "test-job-cleanup").toString();
private static final String CUSTOM_CLEANUP_FILE_NAME = "_custom_cleanup";
private static final String ABORT_KILLED_FILE_NAME = "_custom_abort_killed";
private static final String ABORT_FAILED_FILE_NAME = "_custom_abort_failed";
private static FileSystem fileSys = null;
private static MiniMRCluster mr = null;
private static Path inDir = null;
private static Path emptyInDir = null;
private static int outDirs = 0;
private static Log LOG = LogFactory.getLog(TestJobCleanup.class);
@BeforeClass
public static void setUp() throws IOException {
JobConf conf = new JobConf();
fileSys = FileSystem.get(conf);
fileSys.delete(new Path(TEST_ROOT_DIR), true);
conf.set("mapred.job.tracker.handler.count", "1");
conf.set("mapred.job.tracker", "127.0.0.1:0");
conf.set("mapred.job.tracker.http.address", "127.0.0.1:0");
conf.set("mapred.task.tracker.http.address", "127.0.0.1:0");
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, TEST_ROOT_DIR +
"/intermediate");
conf.set(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, "true");
mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);
inDir = new Path(TEST_ROOT_DIR, "test-input");
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
DataOutputStream file = fileSys.create(new Path(inDir, "part-" + 0));
file.writeBytes(input);
file.close();
emptyInDir = new Path(TEST_ROOT_DIR, "empty-input");
fileSys.mkdirs(emptyInDir);
}
@AfterClass
public static void tearDown() throws Exception {
if (fileSys != null) {
// fileSys.delete(new Path(TEST_ROOT_DIR), true);
fileSys.close();
}
if (mr != null) {
mr.shutdown();
}
}
/**
* Committer with deprecated
* {@link FileOutputCommitter#cleanupJob(JobContext)} making a _failed/_killed
* in the output folder
*/
static class CommitterWithCustomDeprecatedCleanup extends FileOutputCommitter {
@Override
public void cleanupJob(JobContext context) throws IOException {
System.err.println("---- HERE ----");
JobConf conf = context.getJobConf();
Path outputPath = FileOutputFormat.getOutputPath(conf);
FileSystem fs = outputPath.getFileSystem(conf);
fs.create(new Path(outputPath, CUSTOM_CLEANUP_FILE_NAME)).close();
}
@Override
public void commitJob(JobContext context) throws IOException {
cleanupJob(context);
}
@Override
public void abortJob(JobContext context, int i) throws IOException {
cleanupJob(context);
}
}
/**
* Committer with abort making a _failed/_killed in the output folder
*/
static class CommitterWithCustomAbort extends FileOutputCommitter {
@Override
public void abortJob(JobContext context, int state) throws IOException {
JobConf conf = context.getJobConf();
;
Path outputPath = FileOutputFormat.getOutputPath(conf);
FileSystem fs = outputPath.getFileSystem(conf);
String fileName = (state == JobStatus.FAILED) ? TestJobCleanup.ABORT_FAILED_FILE_NAME
: TestJobCleanup.ABORT_KILLED_FILE_NAME;
fs.create(new Path(outputPath, fileName)).close();
}
}
private Path getNewOutputDir() {
return new Path(TEST_ROOT_DIR, "output-" + outDirs++);
}
private void configureJob(JobConf jc, String jobName, int maps, int reds,
Path outDir) {
jc.setJobName(jobName);
jc.setInputFormat(TextInputFormat.class);
jc.setOutputKeyClass(LongWritable.class);
jc.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(jc, inDir);
FileOutputFormat.setOutputPath(jc, outDir);
jc.setMapperClass(IdentityMapper.class);
jc.setReducerClass(IdentityReducer.class);
jc.setNumMapTasks(maps);
jc.setNumReduceTasks(reds);
}
// run a job with 1 map and let it run to completion
private void testSuccessfulJob(String filename,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "job with cleanup()", 1, 0, outDir);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
job.waitForCompletion();
LOG.info("Job finished : " + job.isComplete());
Path testFile = new Path(outDir, filename);
assertTrue("Done file \"" + testFile + "\" missing for job " + id,
fileSys.exists(testFile));
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for successful job "
+ id, fileSys.exists(file));
}
}
// run a job for which all the attempts simply fail.
private void testFailedJob(String fileName,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "fail job with abort()", 1, 0, outDir);
jc.setMaxMapAttempts(1);
// set the job to fail
jc.setMapperClass(UtilsForTests.FailMapper.class);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
job.waitForCompletion();
assertEquals("Job did not fail", JobStatus.FAILED, job.getJobState());
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for failed job " + id,
fileSys.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for failed job "
+ id, fileSys.exists(file));
}
}
// run a job which gets stuck in mapper and kill it.
private void testKilledJob(String fileName,
Class<? extends OutputCommitter> committer, String[] exclude)
throws IOException {
JobConf jc = mr.createJobConf();
Path outDir = getNewOutputDir();
configureJob(jc, "kill job with abort()", 1, 0, outDir);
// set the job to wait for long
jc.setMapperClass(UtilsForTests.KillMapper.class);
jc.setOutputCommitter(committer);
JobClient jobClient = new JobClient(jc);
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
Counters counters = job.getCounters();
// wait for the map to be launched
while (true) {
if (counters.getCounter(JobCounter.TOTAL_LAUNCHED_MAPS) == 1) {
break;
}
LOG.info("Waiting for a map task to be launched");
UtilsForTests.waitFor(100);
counters = job.getCounters();
}
job.killJob(); // kill the job
job.waitForCompletion(); // wait for the job to complete
assertEquals("Job was not killed", JobStatus.KILLED, job.getJobState());
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job " + id,
fileSys.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job "
+ id, fileSys.exists(file));
}
}
/**
* Test default cleanup/abort behavior
*
* @throws IOException
*/
@Test
public void testDefaultCleanupAndAbort() throws IOException {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
FileOutputCommitter.class, new String[] {});
// check with a failed job
testFailedJob(null, FileOutputCommitter.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
// check default abort job kill
testKilledJob(null, FileOutputCommitter.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME });
}
/**
* Test if a failed job with custom committer runs the abort code.
*
* @throws IOException
*/
@Test
public void testCustomAbort() throws IOException {
// check with a successful job
testSuccessfulJob(FileOutputCommitter.SUCCEEDED_FILE_NAME,
CommitterWithCustomAbort.class, new String[] { ABORT_FAILED_FILE_NAME,
ABORT_KILLED_FILE_NAME });
// check with a failed job
testFailedJob(ABORT_FAILED_FILE_NAME, CommitterWithCustomAbort.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_KILLED_FILE_NAME });
// check with a killed job
testKilledJob(ABORT_KILLED_FILE_NAME, CommitterWithCustomAbort.class,
new String[] { FileOutputCommitter.SUCCEEDED_FILE_NAME,
ABORT_FAILED_FILE_NAME });
}
/**
* Test if a failed job with custom committer runs the deprecated
* {@link FileOutputCommitter#cleanupJob(JobContext)} code for api
* compatibility testing.
*/
@Test
public void testCustomCleanup() throws IOException {
// check with a successful job
testSuccessfulJob(CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {});
// check with a failed job
testFailedJob(CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
// check with a killed job
testKilledJob(TestJobCleanup.CUSTOM_CLEANUP_FILE_NAME,
CommitterWithCustomDeprecatedCleanup.class,
new String[] {FileOutputCommitter.SUCCEEDED_FILE_NAME});
}
}
| 11,507 | 34.300613 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
public class TestTextOutputFormat {
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
// A random task attempt id for testing.
private static String attempt = "attempt_200707121733_0001_m_000000_0";
private static Path workDir =
new Path(new Path(
new Path(System.getProperty("test.build.data", "."),
"data"),
FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file = "test_format.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1");
Text key2 = new Text("key2");
Text val1 = new Text("val1");
Text val2 = new Text("val2");
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
File expectedFile = new File(new Path(workDir, file).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(expectedOutput.toString(), output);
}
@Test
public void testFormatWithCustomSeparator() throws Exception {
JobConf job = new JobConf();
String separator = "\u0001";
job.set("mapreduce.output.textoutputformat.separator", separator);
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file = "test_custom.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1");
Text key2 = new Text("key2");
Text val1 = new Text("val1");
Text val2 = new Text("val2");
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
File expectedFile = new File(new Path(workDir, file).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append(separator).append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append(separator).append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(expectedOutput.toString(), output);
}
/**
* test compressed file
* @throws IOException
*/
@Test
public void testCompress() throws IOException {
JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
job.set(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS,"true");
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file = "test_compress.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1");
Text key2 = new Text("key2");
Text val1 = new Text("val1");
Text val2 = new Text("val2");
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append("\t").append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append("\t").append(val2).append("\n");
DefaultCodec codec = new DefaultCodec();
codec.setConf(job);
Path expectedFile = new Path(workDir, file + codec.getDefaultExtension());
final FileInputStream istream = new FileInputStream(expectedFile.toString());
CompressionInputStream cistream = codec.createInputStream(istream);
LineReader reader = new LineReader(cistream);
String output = "";
Text out = new Text();
while (reader.readLine(out) > 0) {
output += out;
output += "\n";
}
reader.close();
assertEquals(expectedOutput.toString(), output);
}
public static void main(String[] args) throws Exception {
new TestTextOutputFormat().testFormat();
}
}
| 8,446 | 35.098291 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTaskStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTaskStatus {
private static final Log LOG = LogFactory.getLog(TestTaskStatus.class);
@Test
public void testMapTaskStatusStartAndFinishTimes() {
checkTaskStatues(true);
}
@Test
public void testReduceTaskStatusStartAndFinishTimes() {
checkTaskStatues(false);
}
/**
* Private utility method which ensures uniform testing of newly created
* TaskStatus object.
*
* @param isMap
* true to test map task status, false for reduce.
*/
private void checkTaskStatues(boolean isMap) {
TaskStatus status = null;
if (isMap) {
status = new MapTaskStatus();
} else {
status = new ReduceTaskStatus();
}
long currentTime = System.currentTimeMillis();
// first try to set the finish time before
// start time is set.
status.setFinishTime(currentTime);
assertEquals("Finish time of the task status set without start time", 0,
status.getFinishTime());
// Now set the start time to right time.
status.setStartTime(currentTime);
assertEquals("Start time of the task status not set correctly.",
currentTime, status.getStartTime());
// try setting wrong start time to task status.
long wrongTime = -1;
status.setStartTime(wrongTime);
assertEquals(
"Start time of the task status is set to wrong negative value",
currentTime, status.getStartTime());
// finally try setting wrong finish time i.e. negative value.
status.setFinishTime(wrongTime);
assertEquals("Finish time of task status is set to wrong negative value",
0, status.getFinishTime());
status.setFinishTime(currentTime);
assertEquals("Finish time of the task status not set correctly.",
currentTime, status.getFinishTime());
// test with null task-diagnostics
TaskStatus ts = ((TaskStatus)status.clone());
ts.setDiagnosticInfo(null);
ts.setDiagnosticInfo("");
ts.setStateString(null);
ts.setStateString("");
((TaskStatus)status.clone()).statusUpdate(ts);
// test with null state-string
((TaskStatus)status.clone()).statusUpdate(0, null, null);
((TaskStatus)status.clone()).statusUpdate(0, "", null);
((TaskStatus)status.clone()).statusUpdate(null, 0, "", null, 1);
}
/**
* Test the {@link TaskStatus} against large sized task-diagnostic-info and
* state-string. Does the following
* - create Map/Reduce TaskStatus such that the task-diagnostic-info and
* state-string are small strings and check their contents
* - append them with small string and check their contents
* - append them with large string and check their size
* - update the status using statusUpdate() calls and check the size/contents
* - create Map/Reduce TaskStatus with large string and check their size
*/
@Test
public void testTaskDiagnosticsAndStateString() {
// check the default case
String test = "hi";
final int maxSize = 16;
TaskStatus status = new TaskStatus(null, 0, 0, null, test, test, null, null,
null) {
@Override
protected int getMaxStringSize() {
return maxSize;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
@Override
public boolean getIsMap() {
return false;
}
};
assertEquals("Small diagnostic info test failed",
status.getDiagnosticInfo(), test);
assertEquals("Small state string test failed", status.getStateString(),
test);
// now append some small string and check
String newDInfo = test.concat(test);
status.setDiagnosticInfo(test);
status.setStateString(newDInfo);
assertEquals("Small diagnostic info append failed",
newDInfo, status.getDiagnosticInfo());
assertEquals("Small state-string append failed",
newDInfo, status.getStateString());
// update the status with small state strings
TaskStatus newStatus = (TaskStatus)status.clone();
String newSInfo = "hi1";
newStatus.setStateString(newSInfo);
status.statusUpdate(newStatus);
newDInfo = newDInfo.concat(newStatus.getDiagnosticInfo());
assertEquals("Status-update on diagnostic-info failed",
newDInfo, status.getDiagnosticInfo());
assertEquals("Status-update on state-string failed",
newSInfo, status.getStateString());
newSInfo = "hi2";
status.statusUpdate(0, newSInfo, null);
assertEquals("Status-update on state-string failed",
newSInfo, status.getStateString());
newSInfo = "hi3";
status.statusUpdate(null, 0, newSInfo, null, 0);
assertEquals("Status-update on state-string failed",
newSInfo, status.getStateString());
// now append each with large string
String large = "hihihihihihihihihihi"; // 20 chars
status.setDiagnosticInfo(large);
status.setStateString(large);
assertEquals("Large diagnostic info append test failed",
maxSize, status.getDiagnosticInfo().length());
assertEquals("Large state-string append test failed",
maxSize, status.getStateString().length());
// update a large status with large strings
newStatus.setDiagnosticInfo(large + "0");
newStatus.setStateString(large + "1");
status.statusUpdate(newStatus);
assertEquals("Status-update on diagnostic info failed",
maxSize, status.getDiagnosticInfo().length());
assertEquals("Status-update on state-string failed",
maxSize, status.getStateString().length());
status.statusUpdate(0, large + "2", null);
assertEquals("Status-update on state-string failed",
maxSize, status.getStateString().length());
status.statusUpdate(null, 0, large + "3", null, 0);
assertEquals("Status-update on state-string failed",
maxSize, status.getStateString().length());
// test passing large string in constructor
status = new TaskStatus(null, 0, 0, null, large, large, null, null,
null) {
@Override
protected int getMaxStringSize() {
return maxSize;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
@Override
public boolean getIsMap() {
return false;
}
};
assertEquals("Large diagnostic info test failed",
maxSize, status.getDiagnosticInfo().length());
assertEquals("Large state-string test failed",
maxSize, status.getStateString().length());
}
}
| 7,665 | 35.855769 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapred.Task.TaskReporter;
import junit.framework.TestCase;
@SuppressWarnings(value={"unchecked", "deprecation"})
/**
* This test tests the support for a merge operation in Hadoop. The input files
* are already sorted on the key. This test implements an external
* MapOutputCollector implementation that just copies the records to different
* partitions while maintaining the sort order in each partition. The Hadoop
* framework's merge on the reduce side will merge the partitions created to
* generate the final output which is sorted on the key.
*/
public class TestMerge extends TestCase {
private static final int NUM_HADOOP_DATA_NODES = 2;
// Number of input files is same as the number of mappers.
private static final int NUM_MAPPERS = 10;
// Number of reducers.
private static final int NUM_REDUCERS = 4;
// Number of lines per input file.
private static final int NUM_LINES = 1000;
// Where MR job's input will reside.
private static final Path INPUT_DIR = new Path("/testplugin/input");
// Where output goes.
private static final Path OUTPUT = new Path("/testplugin/output");
public void testMerge() throws Exception {
MiniDFSCluster dfsCluster = null;
MiniMRClientCluster mrCluster = null;
FileSystem fileSystem = null;
try {
Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters
dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_HADOOP_DATA_NODES).build();
fileSystem = dfsCluster.getFileSystem();
mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
NUM_HADOOP_DATA_NODES, conf);
// Generate input.
createInput(fileSystem);
// Run the test.
runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem);
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
if (mrCluster != null) {
mrCluster.stop();
}
}
}
private void createInput(FileSystem fs) throws Exception {
fs.delete(INPUT_DIR, true);
for (int i = 0; i < NUM_MAPPERS; i++) {
OutputStream os = fs.create(new Path(INPUT_DIR, "input_" + i + ".txt"));
Writer writer = new OutputStreamWriter(os);
for (int j = 0; j < NUM_LINES; j++) {
// Create sorted key, value pairs.
int k = j + 1;
String formattedNumber = String.format("%09d", k);
writer.write(formattedNumber + " " + formattedNumber + "\n");
}
writer.close();
}
}
private void runMergeTest(JobConf job, FileSystem fileSystem)
throws Exception {
// Delete any existing output.
fileSystem.delete(OUTPUT, true);
job.setJobName("MergeTest");
JobClient client = new JobClient(job);
RunningJob submittedJob = null;
FileInputFormat.setInputPaths(job, INPUT_DIR);
FileOutputFormat.setOutputPath(job, OUTPUT);
job.set("mapreduce.output.textoutputformat.separator", " ");
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(MyMapper.class);
job.setPartitionerClass(MyPartitioner.class);
job.setOutputFormat(TextOutputFormat.class);
job.setNumReduceTasks(NUM_REDUCERS);
job.set(JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR,
MapOutputCopier.class.getName());
try {
submittedJob = client.submitJob(job);
try {
if (! client.monitorAndPrintJob(job, submittedJob)) {
throw new IOException("Job failed!");
}
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
}
} catch(IOException ioe) {
System.err.println("Job failed with: " + ioe);
} finally {
verifyOutput(submittedJob, fileSystem);
}
}
private void verifyOutput(RunningJob submittedJob, FileSystem fileSystem)
throws Exception {
FSDataInputStream dis = null;
long numValidRecords = 0;
long numInvalidRecords = 0;
long numMappersLaunched = NUM_MAPPERS;
String prevKeyValue = "000000000";
Path[] fileList =
FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT,
new Utils.OutputFileUtils.OutputFilesFilter()));
for (Path outFile : fileList) {
try {
dis = fileSystem.open(outFile);
String record;
while((record = dis.readLine()) != null) {
// Split the line into key and value.
int blankPos = record.indexOf(" ");
String keyString = record.substring(0, blankPos);
String valueString = record.substring(blankPos+1);
// Check for sorted output and correctness of record.
if (keyString.compareTo(prevKeyValue) >= 0
&& keyString.equals(valueString)) {
prevKeyValue = keyString;
numValidRecords++;
} else {
numInvalidRecords++;
}
}
} finally {
if (dis != null) {
dis.close();
dis = null;
}
}
}
// Make sure we got all input records in the output in sorted order.
assertEquals((long)(NUM_MAPPERS*NUM_LINES), numValidRecords);
// Make sure there is no extraneous invalid record.
assertEquals(0, numInvalidRecords);
}
/**
* A mapper implementation that assumes that key text contains valid integers
* in displayable form.
*/
public static class MyMapper extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text keyText;
private Text valueText;
public MyMapper() {
keyText = new Text();
valueText = new Text();
}
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String record = value.toString();
int blankPos = record.indexOf(" ");
keyText.set(record.substring(0, blankPos));
valueText.set(record.substring(blankPos+1));
output.collect(keyText, valueText);
}
public void close() throws IOException {
}
}
/**
* Partitioner implementation to make sure that output is in total sorted
* order. We basically route key ranges to different reducers such that
* key values monotonically increase with the partition number. For example,
* in this test, the keys are numbers from 1 to 1000 in the form "000000001"
* to "000001000" in each input file. The keys "000000001" to "000000250" are
* routed to partition 0, "000000251" to "000000500" are routed to partition 1
* and so on since we have 4 reducers.
*/
static class MyPartitioner implements Partitioner<Text, Text> {
public MyPartitioner() {
}
public void configure(JobConf job) {
}
public int getPartition(Text key, Text value, int numPartitions) {
int keyValue = 0;
try {
keyValue = Integer.parseInt(key.toString());
} catch(NumberFormatException nfe) {
keyValue = 0;
}
int partitionNumber = (numPartitions*(Math.max(0, keyValue-1)))/NUM_LINES;
return partitionNumber;
}
}
/**
* Implementation of map output copier(that avoids sorting) on the map side.
* It maintains keys in the input order within each partition created for
* reducers.
*/
static class MapOutputCopier<K, V>
implements MapOutputCollector<K, V> {
private static final int BUF_SIZE = 128*1024;
private MapTask mapTask;
private JobConf jobConf;
private TaskReporter reporter;
private int numberOfPartitions;
private Class<K> keyClass;
private Class<V> valueClass;
private KeyValueWriter<K, V> recordWriters[];
private ByteArrayOutputStream outStreams[];
public MapOutputCopier() {
}
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context)
throws IOException, ClassNotFoundException {
this.mapTask = context.getMapTask();
this.jobConf = context.getJobConf();
this.reporter = context.getReporter();
numberOfPartitions = jobConf.getNumReduceTasks();
keyClass = (Class<K>)jobConf.getMapOutputKeyClass();
valueClass = (Class<V>)jobConf.getMapOutputValueClass();
recordWriters = new KeyValueWriter[numberOfPartitions];
outStreams = new ByteArrayOutputStream[numberOfPartitions];
// Create output streams for partitions.
for (int i = 0; i < numberOfPartitions; i++) {
outStreams[i] = new ByteArrayOutputStream();
recordWriters[i] = new KeyValueWriter<K, V>(jobConf, outStreams[i],
keyClass, valueClass);
}
}
public synchronized void collect(K key, V value, int partitionNumber
) throws IOException, InterruptedException {
if (partitionNumber >= 0 && partitionNumber < numberOfPartitions) {
recordWriters[partitionNumber].write(key, value);
} else {
throw new IOException("Invalid partition number: " + partitionNumber);
}
reporter.progress();
}
public void close() throws IOException, InterruptedException {
long totalSize = 0;
for (int i = 0; i < numberOfPartitions; i++) {
recordWriters[i].close();
outStreams[i].close();
totalSize += outStreams[i].size();
}
MapOutputFile mapOutputFile = mapTask.getMapOutputFile();
Path finalOutput = mapOutputFile.getOutputFileForWrite(totalSize);
Path indexPath = mapOutputFile.getOutputIndexFileForWrite(
numberOfPartitions*mapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
// Copy partitions to final map output.
copyPartitions(finalOutput, indexPath);
}
public void flush() throws IOException, InterruptedException,
ClassNotFoundException {
}
private void copyPartitions(Path mapOutputPath, Path indexPath)
throws IOException {
FileSystem localFs = FileSystem.getLocal(jobConf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
IndexRecord indexRecord = new IndexRecord();
for (int i = 0; i < numberOfPartitions; i++) {
indexRecord.startOffset = rawOutput.getPos();
byte buffer[] = outStreams[i].toByteArray();
IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
checksumOutput.write(buffer);
// Write checksum.
checksumOutput.finish();
// Write index record
indexRecord.rawLength = (long)buffer.length;
indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
spillRecord.putIndex(indexRecord, i);
reporter.progress();
}
rawOutput.close();
spillRecord.writeToFile(indexPath, jobConf);
}
}
static class KeyValueWriter<K, V> {
private Class<K> keyClass;
private Class<V> valueClass;
private DataOutputBuffer dataBuffer;
private Serializer<K> keySerializer;
private Serializer<V> valueSerializer;
private DataOutputStream outputStream;
public KeyValueWriter(Configuration conf, OutputStream output,
Class<K> kyClass, Class<V> valClass
) throws IOException {
keyClass = kyClass;
valueClass = valClass;
dataBuffer = new DataOutputBuffer();
SerializationFactory serializationFactory
= new SerializationFactory(conf);
keySerializer
= (Serializer<K>)serializationFactory.getSerializer(keyClass);
keySerializer.open(dataBuffer);
valueSerializer
= (Serializer<V>)serializationFactory.getSerializer(valueClass);
valueSerializer.open(dataBuffer);
outputStream = new DataOutputStream(output);
}
public void write(K key, V value) throws IOException {
if (key.getClass() != keyClass) {
throw new IOException("wrong key class: "+ key.getClass()
+" is not "+ keyClass);
}
if (value.getClass() != valueClass) {
throw new IOException("wrong value class: "+ value.getClass()
+" is not "+ valueClass);
}
// Append the 'key'
keySerializer.serialize(key);
int keyLength = dataBuffer.getLength();
if (keyLength < 0) {
throw new IOException("Negative key-length not allowed: " + keyLength +
" for " + key);
}
// Append the 'value'
valueSerializer.serialize(value);
int valueLength = dataBuffer.getLength() - keyLength;
if (valueLength < 0) {
throw new IOException("Negative value-length not allowed: " +
valueLength + " for " + value);
}
// Write the record out
WritableUtils.writeVInt(outputStream, keyLength);
WritableUtils.writeVInt(outputStream, valueLength);
outputStream.write(dataBuffer.getData(), 0, dataBuffer.getLength());
// Reset
dataBuffer.reset();
}
public void close() throws IOException {
keySerializer.close();
valueSerializer.close();
WritableUtils.writeVInt(outputStream, IFile.EOF_MARKER);
WritableUtils.writeVInt(outputStream, IFile.EOF_MARKER);
outputStream.close();
}
}
}
| 15,300 | 36.594595 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestInputPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.StringUtils;
public class TestInputPath extends TestCase {
public void testInputPath() throws Exception {
JobConf jobConf = new JobConf();
Path workingDir = jobConf.getWorkingDirectory();
Path path = new Path(workingDir,
"xx{y"+StringUtils.COMMA_STR+"z}");
FileInputFormat.setInputPaths(jobConf, path);
Path[] paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(1, paths.length);
assertEquals(path.toString(), paths[0].toString());
StringBuilder pathStr = new StringBuilder();
pathStr.append(StringUtils.ESCAPE_CHAR);
pathStr.append(StringUtils.ESCAPE_CHAR);
pathStr.append(StringUtils.COMMA);
pathStr.append(StringUtils.COMMA);
pathStr.append('a');
path = new Path(workingDir, pathStr.toString());
FileInputFormat.setInputPaths(jobConf, path);
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(1, paths.length);
assertEquals(path.toString(), paths[0].toString());
pathStr.setLength(0);
pathStr.append(StringUtils.ESCAPE_CHAR);
pathStr.append("xx");
pathStr.append(StringUtils.ESCAPE_CHAR);
path = new Path(workingDir, pathStr.toString());
Path path1 = new Path(workingDir,
"yy"+StringUtils.COMMA_STR+"zz");
FileInputFormat.setInputPaths(jobConf, path);
FileInputFormat.addInputPath(jobConf, path1);
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(2, paths.length);
assertEquals(path.toString(), paths[0].toString());
assertEquals(path1.toString(), paths[1].toString());
FileInputFormat.setInputPaths(jobConf, path, path1);
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(2, paths.length);
assertEquals(path.toString(), paths[0].toString());
assertEquals(path1.toString(), paths[1].toString());
Path[] input = new Path[] {path, path1};
FileInputFormat.setInputPaths(jobConf, input);
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(2, paths.length);
assertEquals(path.toString(), paths[0].toString());
assertEquals(path1.toString(), paths[1].toString());
pathStr.setLength(0);
String str1 = "{a{b,c},de}";
String str2 = "xyz";
String str3 = "x{y,z}";
pathStr.append(str1);
pathStr.append(StringUtils.COMMA);
pathStr.append(str2);
pathStr.append(StringUtils.COMMA);
pathStr.append(str3);
FileInputFormat.setInputPaths(jobConf, pathStr.toString());
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(3, paths.length);
assertEquals(new Path(workingDir, str1).toString(), paths[0].toString());
assertEquals(new Path(workingDir, str2).toString(), paths[1].toString());
assertEquals(new Path(workingDir, str3).toString(), paths[2].toString());
pathStr.setLength(0);
String str4 = "abc";
String str5 = "pq{r,s}";
pathStr.append(str4);
pathStr.append(StringUtils.COMMA);
pathStr.append(str5);
FileInputFormat.addInputPaths(jobConf, pathStr.toString());
paths = FileInputFormat.getInputPaths(jobConf);
assertEquals(5, paths.length);
assertEquals(new Path(workingDir, str1).toString(), paths[0].toString());
assertEquals(new Path(workingDir, str2).toString(), paths[1].toString());
assertEquals(new Path(workingDir, str3).toString(), paths[2].toString());
assertEquals(new Path(workingDir, str4).toString(), paths[3].toString());
assertEquals(new Path(workingDir, str5).toString(), paths[4].toString());
}
}
| 4,524 | 40.136364 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineSequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.IOException;
import java.util.BitSet;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.lib.CombineFileSplit;
import org.apache.hadoop.mapred.lib.CombineSequenceFileInputFormat;
import org.junit.Test;
public class TestCombineSequenceFileInputFormat {
private static final Log LOG =
LogFactory.getLog(TestCombineSequenceFileInputFormat.class);
private static Configuration conf = new Configuration();
private static FileSystem localFs = null;
static {
try {
conf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(conf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
@SuppressWarnings("deprecation")
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestCombineSequenceFileInputFormat").makeQualified(localFs);
@Test(timeout=10000)
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
Reporter reporter = Reporter.NULL;
Random random = new Random();
long seed = random.nextLong();
LOG.info("seed = "+seed);
random.setSeed(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
final int length = 10000;
final int numFiles = 10;
// create a file with various lengths
createFiles(length, numFiles, random);
// create a combine split for the files
InputFormat<IntWritable, BytesWritable> format =
new CombineSequenceFileInputFormat<IntWritable, BytesWritable>();
IntWritable key = new IntWritable();
BytesWritable value = new BytesWritable();
for (int i = 0; i < 3; i++) {
int numSplits =
random.nextInt(length/(SequenceFile.SYNC_INTERVAL/20))+1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
LOG.info("splitting: got = " + splits.length);
// we should have a single split as the length is comfortably smaller than
// the block size
assertEquals("We got more than one splits!", 1, splits.length);
InputSplit split = splits[0];
assertEquals("It should be CombineFileSplit",
CombineFileSplit.class, split.getClass());
// check each split
BitSet bits = new BitSet(length);
RecordReader<IntWritable, BytesWritable> reader =
format.getRecordReader(split, job, reporter);
try {
while (reader.next(key, value)) {
assertFalse("Key in multiple partitions.", bits.get(key.get()));
bits.set(key.get());
}
} finally {
reader.close();
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
private static class Range {
private final int start;
private final int end;
Range(int start, int end) {
this.start = start;
this.end = end;
}
@Override
public String toString() {
return "(" + start + ", " + end + ")";
}
}
private static Range[] createRanges(int length, int numFiles, Random random) {
// generate a number of files with various lengths
Range[] ranges = new Range[numFiles];
for (int i = 0; i < numFiles; i++) {
int start = i == 0 ? 0 : ranges[i-1].end;
int end = i == numFiles - 1 ?
length :
(length/numFiles)*(2*i + 1)/2 + random.nextInt(length/numFiles) + 1;
ranges[i] = new Range(start, end);
}
return ranges;
}
private static void createFiles(int length, int numFiles, Random random)
throws IOException {
Range[] ranges = createRanges(length, numFiles, random);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(workDir, "test_" + i + ".seq");
// create a file with length entries
@SuppressWarnings("deprecation")
SequenceFile.Writer writer =
SequenceFile.createWriter(localFs, conf, file,
IntWritable.class, BytesWritable.class);
Range range = ranges[i];
try {
for (int j = range.start; j < range.end; j++) {
IntWritable key = new IntWritable(j);
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
}
}
}
| 5,673 | 32.181287 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Assert;
import org.junit.Test;
@SuppressWarnings("deprecation")
public class JobClientUnitTest {
public class TestJobClient extends JobClient {
TestJobClient(JobConf jobConf) throws IOException {
super(jobConf);
}
void setCluster(Cluster cluster) {
this.cluster = cluster;
}
}
public class TestJobClientGetJob extends TestJobClient {
int lastGetJobRetriesCounter = 0;
int getJobRetriesCounter = 0;
int getJobRetries = 0;
RunningJob runningJob;
TestJobClientGetJob(JobConf jobConf) throws IOException {
super(jobConf);
}
public int getLastGetJobRetriesCounter() {
return lastGetJobRetriesCounter;
}
public void setGetJobRetries(int getJobRetries) {
this.getJobRetries = getJobRetries;
}
public void setRunningJob(RunningJob runningJob) {
this.runningJob = runningJob;
}
protected RunningJob getJobInner(final JobID jobid) throws IOException {
if (getJobRetriesCounter >= getJobRetries) {
lastGetJobRetriesCounter = getJobRetriesCounter;
getJobRetriesCounter = 0;
return runningJob;
}
getJobRetriesCounter++;
return null;
}
}
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getMapTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getReduceTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getSetupTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getCleanupTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
@Test
public void testShowJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
long startTime = System.currentTimeMillis();
JobID jobID = new JobID(String.valueOf(startTime), 12345);
JobStatus mockJobStatus = mock(JobStatus.class);
when(mockJobStatus.getJobID()).thenReturn(jobID);
when(mockJobStatus.getJobName()).thenReturn(jobID.toString());
when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
when(mockJobStatus.getStartTime()).thenReturn(startTime);
when(mockJobStatus.getUsername()).thenReturn("mockuser");
when(mockJobStatus.getQueue()).thenReturn("mockqueue");
when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
when(mockJobStatus.getUsedMem()).thenReturn(1024);
when(mockJobStatus.getReservedMem()).thenReturn(512);
when(mockJobStatus.getNeededMem()).thenReturn(2048);
when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");
Job mockJob = mock(Job.class);
when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn(
new TaskReport[5]);
Cluster mockCluster = mock(Cluster.class);
when(mockCluster.getJob(jobID)).thenReturn(mockJob);
client.setCluster(mockCluster);
ByteArrayOutputStream out = new ByteArrayOutputStream();
client.displayJobList(new JobStatus[] {mockJobStatus}, new PrintWriter(out));
String commandLineOutput = out.toString();
System.out.println(commandLineOutput);
Assert.assertTrue(commandLineOutput.contains("Total jobs:1"));
verify(mockJobStatus, atLeastOnce()).getJobID();
verify(mockJobStatus).getState();
verify(mockJobStatus).getStartTime();
verify(mockJobStatus).getUsername();
verify(mockJobStatus).getQueue();
verify(mockJobStatus).getPriority();
verify(mockJobStatus).getNumUsedSlots();
verify(mockJobStatus).getNumReservedSlots();
verify(mockJobStatus).getUsedMem();
verify(mockJobStatus).getReservedMem();
verify(mockJobStatus).getNeededMem();
verify(mockJobStatus).getSchedulingInfo();
// This call should not go to each AM.
verify(mockCluster, never()).getJob(jobID);
verify(mockJob, never()).getTaskReports(isA(TaskType.class));
}
@Test
public void testGetJobWithUnknownJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("unknown",0);
when(mockCluster.getJob(id)).thenReturn(null);
assertNull(client.getJob(id));
}
@Test
public void testGetJobRetry() throws Exception {
//To prevent the test from running for a very long time, lower the retry
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
TestJobClientGetJob client = new TestJobClientGetJob(conf);
JobID id = new JobID("ajob",1);
RunningJob rj = mock(RunningJob.class);
client.setRunningJob(rj);
//no retry
assertNotNull(client.getJob(id));
assertEquals(client.getLastGetJobRetriesCounter(), 0);
//3 retry
client.setGetJobRetries(3);
assertNotNull(client.getJob(id));
assertEquals(client.getLastGetJobRetriesCounter(), 3);
//beyond MAPREDUCE_JOBCLIENT_GETJOB_MAX_RETRY_KEY, will get null
client.setGetJobRetries(5);
assertNull(client.getJob(id));
}
}
| 8,183 | 31.736 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This is an wordcount application that tests the count of records
* got spilled to disk. It generates simple text input files. Then
* runs the wordcount map/reduce application on (1) 3 i/p files(with 3 maps
* and 1 reduce) and verifies the counters and (2) 4 i/p files(with 4 maps
* and 1 reduce) and verifies counters. Wordcount application reads the
* text input files, breaks each line into words and counts them. The output
* is a locally sorted list of words and the count of how often they occurred.
*
*/
public class TestJobCounters {
private void validateFileCounters(Counters counter, long fileBytesRead,
long fileBytesWritten, long mapOutputBytes,
long mapOutputMaterializedBytes) {
assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
.getValue() != 0);
assertEquals(fileBytesRead,
counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());
assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
.getValue() != 0);
if (mapOutputBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
}
if (mapOutputMaterializedBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
.getValue() != 0);
}
}
@SuppressWarnings("deprecation")
private void validateOldFileCounters(Counters counter, long fileBytesRead,
long fileBytesWritten, long mapOutputBytes,
long mapOutputMaterializedBytes) {
assertEquals(fileBytesRead,
counter.findCounter(FileInputFormat.Counter.BYTES_READ).getValue());
assertEquals(
fileBytesRead,
counter
.findCounter(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.BYTES_READ)
.getValue());
assertEquals(fileBytesWritten,
counter.findCounter(FileOutputFormat.Counter.BYTES_WRITTEN).getValue());
assertEquals(
fileBytesWritten,
counter
.findCounter(
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.BYTES_WRITTEN)
.getValue());
if (mapOutputBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
}
if (mapOutputMaterializedBytes >= 0) {
assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
.getValue() != 0);
}
}
private void validateCounters(Counters counter, long spillRecCnt,
long mapInputRecords, long mapOutputRecords) {
// Check if the numer of Spilled Records is same as expected
assertEquals(spillRecCnt,
counter.findCounter(TaskCounter.SPILLED_RECORDS).getCounter());
assertEquals(mapInputRecords,
counter.findCounter(TaskCounter.MAP_INPUT_RECORDS).getCounter());
assertEquals(mapOutputRecords,
counter.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getCounter());
}
private void removeWordsFile(Path inpFile, Configuration conf)
throws IOException {
final FileSystem fs = inpFile.getFileSystem(conf);
if (fs.exists(inpFile) && !fs.delete(inpFile, false)) {
throw new IOException("Failed to delete " + inpFile);
}
}
private static void createWordsFile(Path inpFile, Configuration conf)
throws IOException {
final FileSystem fs = inpFile.getFileSystem(conf);
if (fs.exists(inpFile)) {
return;
}
FSDataOutputStream out = fs.create(inpFile);
try {
// 1024*4 unique words --- repeated 5 times => 5*2K words
int REPLICAS=5, NUMLINES=1024, NUMWORDSPERLINE=4;
final String WORD = "zymurgy"; // 7 bytes + 4 id bytes
final Formatter fmt = new Formatter(new StringBuilder());
for (int i = 0; i < REPLICAS; i++) {
for (int j = 1; j <= NUMLINES*NUMWORDSPERLINE; j+=NUMWORDSPERLINE) {
((StringBuilder)fmt.out()).setLength(0);
for (int k = 0; k < NUMWORDSPERLINE; ++k) {
fmt.format("%s%04d ", WORD, j + k);
}
((StringBuilder)fmt.out()).append("\n");
out.writeBytes(fmt.toString());
}
}
} finally {
out.close();
}
}
private static Path IN_DIR = null;
private static Path OUT_DIR = null;
private static Path testdir = null;
private static Path[] inFiles = new Path[5];
private static long getFileSize(Path path) throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration());
long len = 0;
len += fs.getFileStatus(path).getLen();
Path crcPath = new Path(path.getParent(), "." + path.getName() + ".crc");
if (fs.exists(crcPath)) {
len += fs.getFileStatus(crcPath).getLen();
}
return len;
}
@BeforeClass
public static void initPaths() throws IOException {
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data", "/tmp"));
testdir = new Path(TEST_ROOT_DIR, "spilledRecords.countertest");
IN_DIR = new Path(testdir, "in");
OUT_DIR = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
testdir = new Path(TEST_ROOT_DIR, "spilledRecords.countertest");
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(IN_DIR)) {
throw new IOException("Mkdirs failed to create " + IN_DIR);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(IN_DIR, "input5_2k_" + i);
}
// create 3 input files each with 5*2k words
createWordsFile(inFiles[0], conf);
createWordsFile(inFiles[1], conf);
createWordsFile(inFiles[2], conf);
}
@AfterClass
public static void cleanup() throws IOException {
//clean up the input and output files
final Configuration conf = new Configuration();
final FileSystem fs = testdir.getFileSystem(conf);
if (fs.exists(testdir)) {
fs.delete(testdir, true);
}
}
public static JobConf createConfiguration() throws IOException {
JobConf baseConf = new JobConf(TestJobCounters.class);
baseConf.setOutputKeyClass(Text.class);
baseConf.setOutputValueClass(IntWritable.class);
baseConf.setMapperClass(WordCount.MapClass.class);
baseConf.setCombinerClass(WordCount.Reduce.class);
baseConf.setReducerClass(WordCount.Reduce.class);
baseConf.setNumReduceTasks(1);
baseConf.setInt(JobContext.IO_SORT_MB, 1);
baseConf.set(JobContext.MAP_SORT_SPILL_PERCENT, "0.50");
baseConf.setInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
return baseConf;
}
public static Job createJob() throws IOException {
final Configuration conf = new Configuration();
final Job baseJob = Job.getInstance(conf);
baseJob.setOutputKeyClass(Text.class);
baseJob.setOutputValueClass(IntWritable.class);
baseJob.setMapperClass(NewMapTokenizer.class);
baseJob.setCombinerClass(NewSummer.class);
baseJob.setReducerClass(NewSummer.class);
baseJob.setNumReduceTasks(1);
baseJob.getConfiguration().setInt(JobContext.IO_SORT_MB, 1);
baseJob.getConfiguration().set(JobContext.MAP_SORT_SPILL_PERCENT, "0.50");
baseJob.getConfiguration().setInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setMinInputSplitSize(
baseJob, Long.MAX_VALUE);
return baseJob;
}
@Test
public void testOldCounterA() throws Exception {
JobConf conf = createConfiguration();
conf.setNumMapTasks(3);
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
removeWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
FileInputFormat.setInputPaths(conf, IN_DIR);
FileOutputFormat.setOutputPath(conf, new Path(OUT_DIR, "outputO0"));
RunningJob myJob = JobClient.runJob(conf);
Counters c1 = myJob.getCounters();
// Each record requires 16 bytes of metadata, 16 bytes per serialized rec
// (vint word len + word + IntWritable) = (1 + 11 + 4)
// (2^20 buf * .5 spill pcnt) / 32 bytes/record = 2^14 recs per spill
// Each file contains 5 replicas of 4096 words, so the first spill will
// contain 4 (2^14 rec / 2^12 rec/replica) replicas, the second just one.
// Each map spills twice, emitting 4096 records per spill from the
// combiner per spill. The merge adds an additional 8192 records, as
// there are too few spills to combine (2 < 3)
// Each map spills 2^14 records, so maps spill 49152 records, combined.
// The combiner has emitted 24576 records to the reducer; these are all
// fetched straight to memory from the map side. The intermediate merge
// adds 8192 records per segment read; again, there are too few spills to
// combine, so all Total spilled records in the reduce
// is 8192 records / map * 3 maps = 24576.
// Total: map + reduce = 49152 + 24576 = 73728
// 3 files, 5120 = 5 * 1024 rec/file = 15360 input records
// 4 records/line = 61440 output records
validateCounters(c1, 73728, 15360, 61440);
validateFileCounters(c1, inputSize, 0, 0, 0);
validateOldFileCounters(c1, inputSize, 61928, 0, 0);
}
@Test
public void testOldCounterB() throws Exception {
JobConf conf = createConfiguration();
createWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
inputSize += getFileSize(inFiles[3]);
conf.setNumMapTasks(4);
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
FileInputFormat.setInputPaths(conf, IN_DIR);
FileOutputFormat.setOutputPath(conf, new Path(OUT_DIR, "outputO1"));
RunningJob myJob = JobClient.runJob(conf);
Counters c1 = myJob.getCounters();
// As above, each map spills 2^14 records, so 4 maps spill 2^16 records
// In the reduce, there are two intermediate merges before the reduce.
// 1st merge: read + write = 8192 * 4
// 2nd merge: read + write = 8192 * 4
// final merge: 0
// Total reduce: 32768
// Total: map + reduce = 2^16 + 2^15 = 98304
// 4 files, 5120 = 5 * 1024 rec/file = 15360 input records
// 4 records/line = 81920 output records
validateCounters(c1, 98304, 20480, 81920);
validateFileCounters(c1, inputSize, 0, 0, 0);
}
@Test
public void testOldCounterC() throws Exception {
JobConf conf = createConfiguration();
createWordsFile(inFiles[3], conf);
createWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
inputSize += getFileSize(inFiles[3]);
inputSize += getFileSize(inFiles[4]);
conf.setNumMapTasks(4);
conf.setInt(JobContext.IO_SORT_FACTOR, 3);
FileInputFormat.setInputPaths(conf, IN_DIR);
FileOutputFormat.setOutputPath(conf, new Path(OUT_DIR, "outputO2"));
RunningJob myJob = JobClient.runJob(conf);
Counters c1 = myJob.getCounters();
// As above, each map spills 2^14 records, so 5 maps spill 81920
// 1st merge: read + write = 6 * 8192
// final merge: unmerged = 2 * 8192
// Total reduce: 45056
// 5 files, 5120 = 5 * 1024 rec/file = 15360 input records
// 4 records/line = 102400 output records
validateCounters(c1, 122880, 25600, 102400);
validateFileCounters(c1, inputSize, 0, 0, 0);
}
@Test
public void testOldCounterD() throws Exception {
JobConf conf = createConfiguration();
conf.setNumMapTasks(3);
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
conf.setNumReduceTasks(0);
removeWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
FileInputFormat.setInputPaths(conf, IN_DIR);
FileOutputFormat.setOutputPath(conf, new Path(OUT_DIR, "outputO3"));
RunningJob myJob = JobClient.runJob(conf);
Counters c1 = myJob.getCounters();
// No Reduces. Will go through the direct output collector. Spills=0
validateCounters(c1, 0, 15360, 61440);
validateFileCounters(c1, inputSize, 0, -1, -1);
}
@Test
public void testNewCounterA() throws Exception {
final Job job = createJob();
final Configuration conf = job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
removeWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(
job, IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(
job, new Path(OUT_DIR, "outputN0"));
assertTrue(job.waitForCompletion(true));
final Counters c1 = Counters.downgrade(job.getCounters());
validateCounters(c1, 73728, 15360, 61440);
validateFileCounters(c1, inputSize, 0, 0, 0);
}
@Test
public void testNewCounterB() throws Exception {
final Job job = createJob();
final Configuration conf = job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
createWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
inputSize += getFileSize(inFiles[3]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(
job, IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(
job, new Path(OUT_DIR, "outputN1"));
assertTrue(job.waitForCompletion(true));
final Counters c1 = Counters.downgrade(job.getCounters());
validateCounters(c1, 98304, 20480, 81920);
validateFileCounters(c1, inputSize, 0, 0, 0);
}
@Test
public void testNewCounterC() throws Exception {
final Job job = createJob();
final Configuration conf = job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR, 3);
createWordsFile(inFiles[3], conf);
createWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
inputSize += getFileSize(inFiles[3]);
inputSize += getFileSize(inFiles[4]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(
job, IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(
job, new Path(OUT_DIR, "outputN2"));
assertTrue(job.waitForCompletion(true));
final Counters c1 = Counters.downgrade(job.getCounters());
validateCounters(c1, 122880, 25600, 102400);
validateFileCounters(c1, inputSize, 0, 0, 0);
}
@Test
public void testNewCounterD() throws Exception {
final Job job = createJob();
final Configuration conf = job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR, 2);
job.setNumReduceTasks(0);
removeWordsFile(inFiles[3], conf);
removeWordsFile(inFiles[4], conf);
long inputSize = 0;
inputSize += getFileSize(inFiles[0]);
inputSize += getFileSize(inFiles[1]);
inputSize += getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,
IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,
new Path(OUT_DIR, "outputN3"));
assertTrue(job.waitForCompletion(true));
final Counters c1 = Counters.downgrade(job.getCounters());
validateCounters(c1, 0, 15360, 61440);
validateFileCounters(c1, inputSize, 0, -1, -1);
}
@SuppressWarnings("deprecation")
@Test
public void testOldCounters() throws Exception {
Counters c1 = new Counters();
c1.incrCounter(FileInputFormat.Counter.BYTES_READ, 100);
c1.incrCounter(FileOutputFormat.Counter.BYTES_WRITTEN, 200);
c1.incrCounter(TaskCounter.MAP_OUTPUT_BYTES, 100);
c1.incrCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES, 100);
validateFileCounters(c1, 100, 200, 100, 100);
validateOldFileCounters(c1, 100, 200, 100, 100);
}
/**
* Increases the JVM's heap usage to the specified target value.
*/
static class MemoryLoader {
private static final int DEFAULT_UNIT_LOAD_SIZE = 10 * 1024 * 1024; // 10mb
// the target value to reach
private long targetValue;
// a list to hold the load objects
private List<String> loadObjects = new ArrayList<String>();
MemoryLoader(long targetValue) {
this.targetValue = targetValue;
}
/**
* Loads the memory to the target value.
*/
void load() {
while (Runtime.getRuntime().totalMemory() < targetValue) {
System.out.println("Loading memory with " + DEFAULT_UNIT_LOAD_SIZE
+ " characters. Current usage : "
+ Runtime.getRuntime().totalMemory());
// load some objects in the memory
loadObjects.add(RandomStringUtils.random(DEFAULT_UNIT_LOAD_SIZE));
// sleep for 100ms
try {
Thread.sleep(100);
} catch (InterruptedException ie) {}
}
}
}
/**
* A mapper that increases the JVM's heap usage to a target value configured
* via {@link MemoryLoaderMapper#TARGET_VALUE} using a {@link MemoryLoader}.
*/
@SuppressWarnings({"deprecation", "unchecked"})
static class MemoryLoaderMapper
extends MapReduceBase
implements Mapper<WritableComparable, Writable,
WritableComparable, Writable> {
static final String TARGET_VALUE = "map.memory-loader.target-value";
private static MemoryLoader loader = null;
public void map(WritableComparable key, Writable val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
assertNotNull("Mapper not configured!", loader);
// load the memory
loader.load();
// work as identity mapper
output.collect(key, val);
}
public void configure(JobConf conf) {
loader = new MemoryLoader(conf.getLong(TARGET_VALUE, -1));
}
}
/**
* A reducer that increases the JVM's heap usage to a target value configured
* via {@link MemoryLoaderReducer#TARGET_VALUE} using a {@link MemoryLoader}.
*/
@SuppressWarnings({"deprecation", "unchecked"})
static class MemoryLoaderReducer extends MapReduceBase
implements Reducer<WritableComparable, Writable,
WritableComparable, Writable> {
static final String TARGET_VALUE = "reduce.memory-loader.target-value";
private static MemoryLoader loader = null;
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
assertNotNull("Reducer not configured!", loader);
// load the memory
loader.load();
// work as identity reducer
output.collect(key, key);
}
public void configure(JobConf conf) {
loader = new MemoryLoader(conf.getLong(TARGET_VALUE, -1));
}
}
@SuppressWarnings("deprecation")
private long getTaskCounterUsage (JobClient client, JobID id, int numReports,
int taskId, TaskType type)
throws Exception {
TaskReport[] reports = null;
if (TaskType.MAP.equals(type)) {
reports = client.getMapTaskReports(id);
} else if (TaskType.REDUCE.equals(type)) {
reports = client.getReduceTaskReports(id);
}
assertNotNull("No reports found for task type '" + type.name()
+ "' in job " + id, reports);
// make sure that the total number of reports match the expected
assertEquals("Mismatch in task id", numReports, reports.length);
Counters counters = reports[taskId].getCounters();
return counters.getCounter(TaskCounter.COMMITTED_HEAP_BYTES);
}
// set up heap options, target value for memory loader and the output
// directory before running the job
@SuppressWarnings("deprecation")
private static RunningJob runHeapUsageTestJob(JobConf conf, Path testRootDir,
String heapOptions, long targetMapValue,
long targetReduceValue, FileSystem fs,
JobClient client, Path inDir)
throws IOException {
// define a job
JobConf jobConf = new JobConf(conf);
// configure the jobs
jobConf.setNumMapTasks(1);
jobConf.setNumReduceTasks(1);
jobConf.setMapperClass(MemoryLoaderMapper.class);
jobConf.setReducerClass(MemoryLoaderReducer.class);
jobConf.setInputFormat(TextInputFormat.class);
jobConf.setOutputKeyClass(LongWritable.class);
jobConf.setOutputValueClass(Text.class);
jobConf.setMaxMapAttempts(1);
jobConf.setMaxReduceAttempts(1);
jobConf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, heapOptions);
jobConf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, heapOptions);
// set the targets
jobConf.setLong(MemoryLoaderMapper.TARGET_VALUE, targetMapValue);
jobConf.setLong(MemoryLoaderReducer.TARGET_VALUE, targetReduceValue);
// set the input directory for the job
FileInputFormat.setInputPaths(jobConf, inDir);
// define job output folder
Path outDir = new Path(testRootDir, "out");
fs.delete(outDir, true);
FileOutputFormat.setOutputPath(jobConf, outDir);
// run the job
RunningJob job = client.submitJob(jobConf);
job.waitForCompletion();
JobID jobID = job.getID();
assertTrue("Job " + jobID + " failed!", job.isSuccessful());
return job;
}
/**
* Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}.
* The test consists of running a low-memory job which consumes less heap
* memory and then running a high-memory job which consumes more heap memory,
* and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller
* than that of the high-memory job.
* @throws IOException
*/
@Test
@SuppressWarnings("deprecation")
public void testHeapUsageCounter() throws Exception {
JobConf conf = new JobConf();
// create a local filesystem handle
FileSystem fileSystem = FileSystem.getLocal(conf);
// define test root directories
Path rootDir =
new Path(System.getProperty("test.build.data", "/tmp"));
Path testRootDir = new Path(rootDir, "testHeapUsageCounter");
// cleanup the test root directory
fileSystem.delete(testRootDir, true);
// set the current working directory
fileSystem.setWorkingDirectory(testRootDir);
fileSystem.deleteOnExit(testRootDir);
// create a mini cluster using the local file system
MiniMRCluster mrCluster =
new MiniMRCluster(1, fileSystem.getUri().toString(), 1);
try {
conf = mrCluster.createJobConf();
JobClient jobClient = new JobClient(conf);
// define job input
Path inDir = new Path(testRootDir, "in");
// create input data
createWordsFile(inDir, conf);
// configure and run a low memory job which will run without loading the
// jvm's heap
RunningJob lowMemJob =
runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G",
0, 0, fileSystem, jobClient, inDir);
JobID lowMemJobID = lowMemJob.getID();
long lowMemJobMapHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID,
1, 0, TaskType.MAP);
System.out.println("Job1 (low memory job) map task heap usage: "
+ lowMemJobMapHeapUsage);
long lowMemJobReduceHeapUsage =
getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.REDUCE);
System.out.println("Job1 (low memory job) reduce task heap usage: "
+ lowMemJobReduceHeapUsage);
// configure and run a high memory job which will load the jvm's heap
RunningJob highMemJob =
runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G",
lowMemJobMapHeapUsage + 256*1024*1024,
lowMemJobReduceHeapUsage + 256*1024*1024,
fileSystem, jobClient, inDir);
JobID highMemJobID = highMemJob.getID();
long highMemJobMapHeapUsage = getTaskCounterUsage(jobClient, highMemJobID,
1, 0, TaskType.MAP);
System.out.println("Job2 (high memory job) map task heap usage: "
+ highMemJobMapHeapUsage);
long highMemJobReduceHeapUsage =
getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.REDUCE);
System.out.println("Job2 (high memory job) reduce task heap usage: "
+ highMemJobReduceHeapUsage);
assertTrue("Incorrect map heap usage reported by the map task",
lowMemJobMapHeapUsage < highMemJobMapHeapUsage);
assertTrue("Incorrect reduce heap usage reported by the reduce task",
lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
} finally {
// shutdown the mr cluster
mrCluster.shutdown();
try {
fileSystem.delete(testRootDir, true);
} catch (IOException ioe) {}
}
}
public static class NewMapTokenizer
extends org.apache.hadoop.mapreduce.Mapper<Object,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class NewSummer
extends org.apache.hadoop.mapreduce.Reducer<Text,IntWritable,
Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
}
| 28,727 | 37.050331 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestWritableJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.util.GenericsUtil;
public class TestWritableJobConf extends TestCase {
private static final Configuration CONF = new Configuration();
private <K> K serDeser(K conf) throws Exception {
SerializationFactory factory = new SerializationFactory(CONF);
Serializer<K> serializer =
factory.getSerializer(GenericsUtil.getClass(conf));
Deserializer<K> deserializer =
factory.getDeserializer(GenericsUtil.getClass(conf));
DataOutputBuffer out = new DataOutputBuffer();
serializer.open(out);
serializer.serialize(conf);
serializer.close();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
deserializer.open(in);
K after = deserializer.deserialize(null);
deserializer.close();
return after;
}
private void assertEquals(Configuration conf1, Configuration conf2) {
// We ignore deprecated keys because after deserializing, both the
// deprecated and the non-deprecated versions of a config are set.
// This is consistent with both the set and the get methods.
Iterator<Map.Entry<String, String>> iterator1 = conf1.iterator();
Map<String, String> map1 = new HashMap<String,String>();
while (iterator1.hasNext()) {
Map.Entry<String, String> entry = iterator1.next();
if (!Configuration.isDeprecated(entry.getKey())) {
map1.put(entry.getKey(), entry.getValue());
}
}
Iterator<Map.Entry<String, String>> iterator2 = conf2.iterator();
Map<String, String> map2 = new HashMap<String,String>();
while (iterator2.hasNext()) {
Map.Entry<String, String> entry = iterator2.next();
if (!Configuration.isDeprecated(entry.getKey())) {
map2.put(entry.getKey(), entry.getValue());
}
}
assertEquals(map1, map2);
}
public void testEmptyConfiguration() throws Exception {
JobConf conf = new JobConf();
Configuration deser = serDeser(conf);
assertEquals(conf, deser);
}
public void testNonEmptyConfiguration() throws Exception {
JobConf conf = new JobConf();
conf.set("a", "A");
conf.set("b", "B");
Configuration deser = serDeser(conf);
assertEquals(conf, deser);
}
public void testConfigurationWithDefaults() throws Exception {
JobConf conf = new JobConf(false);
conf.set("a", "A");
conf.set("b", "B");
Configuration deser = serDeser(conf);
assertEquals(conf, deser);
}
}
| 3,705 | 33.635514 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.Progressable;
/**
* This test exercises the ValueIterator.
*/
public class TestReduceTask extends TestCase {
static class NullProgress implements Progressable {
public void progress() { }
}
private static class Pair {
String key;
String value;
Pair(String k, String v) {
key = k;
value = v;
}
}
private static Pair[][] testCases =
new Pair[][]{
new Pair[]{
new Pair("k1", "v1"),
new Pair("k2", "v2"),
new Pair("k3", "v3"),
new Pair("k3", "v4"),
new Pair("k4", "v5"),
new Pair("k5", "v6"),
},
new Pair[]{
new Pair("", "v1"),
new Pair("k1", "v2"),
new Pair("k2", "v3"),
new Pair("k2", "v4"),
},
new Pair[] {},
new Pair[]{
new Pair("k1", "v1"),
new Pair("k1", "v2"),
new Pair("k1", "v3"),
new Pair("k1", "v4"),
}
};
public void runValueIterator(Path tmpDir, Pair[] vals,
Configuration conf,
CompressionCodec codec) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
Path path = new Path(tmpDir, "data.in");
IFile.Writer<Text, Text> writer =
new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class,
codec, null);
for(Pair p: vals) {
writer.append(new Text(p.key), new Text(p.value));
}
writer.close();
@SuppressWarnings("unchecked")
RawKeyValueIterator rawItr =
Merger.merge(conf, rfs, Text.class, Text.class, codec, new Path[]{path},
false, conf.getInt(JobContext.IO_SORT_FACTOR, 100), tmpDir,
new Text.Comparator(), new NullProgress(), null, null, null);
@SuppressWarnings("unchecked") // WritableComparators are not generic
ReduceTask.ValuesIterator valItr =
new ReduceTask.ValuesIterator<Text,Text>(rawItr,
WritableComparator.get(Text.class), Text.class, Text.class,
conf, new NullProgress());
int i = 0;
while (valItr.more()) {
Object key = valItr.getKey();
String keyString = key.toString();
// make sure it matches!
assertEquals(vals[i].key, keyString);
// must have at least 1 value!
assertTrue(valItr.hasNext());
while (valItr.hasNext()) {
String valueString = valItr.next().toString();
// make sure the values match
assertEquals(vals[i].value, valueString);
// make sure the keys match
assertEquals(vals[i].key, valItr.getKey().toString());
i += 1;
}
// make sure the key hasn't changed under the hood
assertEquals(keyString, valItr.getKey().toString());
valItr.nextKey();
}
assertEquals(vals.length, i);
// make sure we have progress equal to 1.0
assertEquals(1.0f, rawItr.getProgress().get());
}
public void testValueIterator() throws Exception {
Path tmpDir = new Path("build/test/test.reduce.task");
Configuration conf = new Configuration();
for (Pair[] testCase: testCases) {
runValueIterator(tmpDir, testCase, conf, null);
}
}
public void testValueIteratorWithCompression() throws Exception {
Path tmpDir = new Path("build/test/test.reduce.task.compression");
Configuration conf = new Configuration();
DefaultCodec codec = new DefaultCodec();
codec.setConf(conf);
for (Pair[] testCase: testCases) {
runValueIterator(tmpDir, testCase, conf, codec);
}
}
}
| 5,024 | 34.13986 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestUserDefinedCounters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
public class TestUserDefinedCounters extends TestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp")).toURI()
.toString().replace(' ', '+')
+ "/" + TestUserDefinedCounters.class.getName();
private final Path INPUT_DIR = new Path(TEST_ROOT_DIR + "/input");
private final Path OUTPUT_DIR = new Path(TEST_ROOT_DIR + "/out");
private final Path INPUT_FILE = new Path(INPUT_DIR , "inp");
enum EnumCounter { MAP_RECORDS }
static class CountingMapper<K, V> extends IdentityMapper<K, V> {
public void map(K key, V value,
OutputCollector<K, V> output, Reporter reporter)
throws IOException {
output.collect(key, value);
reporter.incrCounter(EnumCounter.MAP_RECORDS, 1);
reporter.incrCounter("StringCounter", "MapRecords", 1);
}
}
private void cleanAndCreateInput(FileSystem fs) throws IOException {
fs.delete(INPUT_DIR, true);
fs.delete(OUTPUT_DIR, true);
OutputStream os = fs.create(INPUT_FILE);
Writer wr = new OutputStreamWriter(os);
wr.write("hello1\n");
wr.write("hello2\n");
wr.write("hello3\n");
wr.write("hello4\n");
wr.close();
}
public void testMapReduceJob() throws Exception {
JobConf conf = new JobConf(TestUserDefinedCounters.class);
conf.setJobName("UserDefinedCounters");
FileSystem fs = FileSystem.get(conf);
cleanAndCreateInput(fs);
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(CountingMapper.class);
conf.setReducerClass(IdentityReducer.class);
FileInputFormat.setInputPaths(conf, INPUT_DIR);
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
RunningJob runningJob = JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
fs.listStatus(OUTPUT_DIR,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = fs.open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
assertTrue(line.contains("hello"));
line = reader.readLine();
}
reader.close();
assertEquals(4, counter);
}
verifyCounters(runningJob, 4);
}
public static void verifyCounters(RunningJob runningJob, int expected)
throws IOException {
assertEquals(expected,
runningJob.getCounters().getCounter(EnumCounter.MAP_RECORDS));
assertEquals(expected,
runningJob.getCounters().getGroup("StringCounter")
.getCounter("MapRecords"));
}
}
| 4,320 | 31.488722 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileInputStream;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log;
/**
* Utilities used in unit test.
*
*/
public class UtilsForTests {
static final Log LOG = LogFactory.getLog(UtilsForTests.class);
final static long KB = 1024L * 1;
final static long MB = 1024L * KB;
final static long GB = 1024L * MB;
final static long TB = 1024L * GB;
final static long PB = 1024L * TB;
final static Object waitLock = new Object();
static DecimalFormat dfm = new DecimalFormat("####.000");
static DecimalFormat ifm = new DecimalFormat("###,###,###,###,###");
public static String dfmt(double d) {
return dfm.format(d);
}
public static String ifmt(double d) {
return ifm.format(d);
}
public static String formatBytes(long numBytes) {
StringBuffer buf = new StringBuffer();
boolean bDetails = true;
double num = numBytes;
if (numBytes < KB) {
buf.append(numBytes + " B");
bDetails = false;
} else if (numBytes < MB) {
buf.append(dfmt(num / KB) + " KB");
} else if (numBytes < GB) {
buf.append(dfmt(num / MB) + " MB");
} else if (numBytes < TB) {
buf.append(dfmt(num / GB) + " GB");
} else if (numBytes < PB) {
buf.append(dfmt(num / TB) + " TB");
} else {
buf.append(dfmt(num / PB) + " PB");
}
if (bDetails) {
buf.append(" (" + ifmt(numBytes) + " bytes)");
}
return buf.toString();
}
public static String formatBytes2(long numBytes) {
StringBuffer buf = new StringBuffer();
long u = 0;
if (numBytes >= TB) {
u = numBytes / TB;
numBytes -= u * TB;
buf.append(u + " TB ");
}
if (numBytes >= GB) {
u = numBytes / GB;
numBytes -= u * GB;
buf.append(u + " GB ");
}
if (numBytes >= MB) {
u = numBytes / MB;
numBytes -= u * MB;
buf.append(u + " MB ");
}
if (numBytes >= KB) {
u = numBytes / KB;
numBytes -= u * KB;
buf.append(u + " KB ");
}
buf.append(u + " B"); //even if zero
return buf.toString();
}
static final String regexpSpecials = "[]()?*+|.!^-\\~@";
public static String regexpEscape(String plain) {
StringBuffer buf = new StringBuffer();
char[] ch = plain.toCharArray();
int csup = ch.length;
for (int c = 0; c < csup; c++) {
if (regexpSpecials.indexOf(ch[c]) != -1) {
buf.append("\\");
}
buf.append(ch[c]);
}
return buf.toString();
}
public static String safeGetCanonicalPath(File f) {
try {
String s = f.getCanonicalPath();
return (s == null) ? f.toString() : s;
} catch (IOException io) {
return f.toString();
}
}
public static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
public static String slurpHadoop(Path p, FileSystem fs) throws IOException {
int len = (int) fs.getFileStatus(p).getLen();
byte[] buf = new byte[len];
InputStream in = fs.open(p);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
public static String rjustify(String s, int width) {
if (s == null) s = "null";
if (width > s.length()) {
s = getSpace(width - s.length()) + s;
}
return s;
}
public static String ljustify(String s, int width) {
if (s == null) s = "null";
if (width > s.length()) {
s = s + getSpace(width - s.length());
}
return s;
}
static char[] space;
static {
space = new char[300];
Arrays.fill(space, '\u0020');
}
public static String getSpace(int len) {
if (len > space.length) {
space = new char[Math.max(len, 2 * space.length)];
Arrays.fill(space, '\u0020');
}
return new String(space, 0, len);
}
/**
* Gets job status from the jobtracker given the jobclient and the job id
*/
static JobStatus getJobStatus(JobClient jc, JobID id) throws IOException {
JobStatus[] statuses = jc.getAllJobs();
for (JobStatus jobStatus : statuses) {
if (jobStatus.getJobID().equals(id)) {
return jobStatus;
}
}
return null;
}
/**
* A utility that waits for specified amount of time
*/
public static void waitFor(long duration) {
try {
synchronized (waitLock) {
waitLock.wait(duration);
}
} catch (InterruptedException ie) {}
}
/**
* Wait for the jobtracker to be RUNNING.
*/
static void waitForJobTracker(JobClient jobClient) {
while (true) {
try {
ClusterStatus status = jobClient.getClusterStatus();
while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
waitFor(100);
status = jobClient.getClusterStatus();
}
break; // means that the jt is ready
} catch (IOException ioe) {}
}
}
/**
* Waits until all the jobs at the jobtracker complete.
*/
static void waitTillDone(JobClient jobClient) throws IOException {
// Wait for the last job to complete
while (true) {
boolean shouldWait = false;
for (JobStatus jobStatuses : jobClient.getAllJobs()) {
if (jobStatuses.getRunState() != JobStatus.SUCCEEDED
&& jobStatuses.getRunState() != JobStatus.FAILED
&& jobStatuses.getRunState() != JobStatus.KILLED) {
shouldWait = true;
break;
}
}
if (shouldWait) {
waitFor(100);
} else {
break;
}
}
}
/**
* Configure a waiting job
*/
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
Path outputPath, int numMaps, int numRed,
String jobName, String mapSignalFilename,
String redSignalFilename)
throws IOException {
jobConf.setJobName(jobName);
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, inDir);
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
jobConf.setInputFormat(RandomInputFormat.class);
jobConf.setNumMapTasks(numMaps);
jobConf.setNumReduceTasks(numRed);
jobConf.setJar("build/test/mapred/testjar/testjob.jar");
jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
/**
* Commonly used map and reduce classes
*/
/**
* Map is a Mapper that just waits for a file to be created on the dfs. The
* file creation is a signal to the mappers and hence acts as a waiting job.
*/
static class WaitingMapper
extends MapReduceBase
implements Mapper<WritableComparable, Writable,
WritableComparable, Writable> {
FileSystem fs = null;
Path signal;
int id = 0;
int totalMaps = 0;
/**
* Checks if the map task needs to wait. By default all the maps will wait.
* This method needs to be overridden to make a custom waiting mapper.
*/
public boolean shouldWait(int id) {
return true;
}
/**
* Returns a signal file on which the map task should wait. By default all
* the maps wait on a single file passed as test.mapred.map.waiting.target.
* This method needs to be overridden to make a custom waiting mapper
*/
public Path getSignalFile(int id) {
return signal;
}
/** The waiting function. The map exits once it gets a signal. Here the
* signal is the file existence.
*/
public void map(WritableComparable key, Writable val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (shouldWait(id)) {
if (fs != null) {
while (!fs.exists(getSignalFile(id))) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for "
+ " the signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
}
public void configure(JobConf conf) {
try {
String taskId = conf.get(JobContext.TASK_ATTEMPT_ID);
id = Integer.parseInt(taskId.split("_")[4]);
totalMaps = Integer.parseInt(conf.get(JobContext.NUM_MAPS));
fs = FileSystem.get(conf);
signal = new Path(conf.get(getTaskSignalParameter(true)));
} catch (IOException ioe) {
System.out.println("Got an exception while obtaining the filesystem");
}
}
}
/** Only the later half of the maps wait for the signal while the rest
* complete immediately.
*/
static class HalfWaitingMapper extends WaitingMapper {
@Override
public boolean shouldWait(int id) {
return id >= (totalMaps / 2);
}
}
/**
* Reduce that just waits for a file to be created on the dfs. The
* file creation is a signal to the reduce.
*/
static class WaitingReducer extends MapReduceBase
implements Reducer<WritableComparable, Writable,
WritableComparable, Writable> {
FileSystem fs = null;
Path signal;
/** The waiting function. The reduce exits once it gets a signal. Here the
* signal is the file existence.
*/
public void reduce(WritableComparable key, Iterator<Writable> val,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
if (fs != null) {
while (!fs.exists(signal)) {
try {
reporter.progress();
synchronized (this) {
this.wait(1000); // wait for 1 sec
}
} catch (InterruptedException ie) {
System.out.println("Interrupted while the map was waiting for the"
+ " signal.");
break;
}
}
} else {
throw new IOException("Could not get the DFS!!");
}
}
public void configure(JobConf conf) {
try {
fs = FileSystem.get(conf);
signal = new Path(conf.get(getTaskSignalParameter(false)));
} catch (IOException ioe) {
System.out.println("Got an exception while obtaining the filesystem");
}
}
}
static String getTaskSignalParameter(boolean isMap) {
return isMap
? "test.mapred.map.waiting.target"
: "test.mapred.reduce.waiting.target";
}
/**
* Signal the maps/reduces to start.
*/
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
String mapSignalFile,
String reduceSignalFile, int replication)
throws IOException, TimeoutException {
try {
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile),
(short)replication);
writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), (short)replication);
} catch (InterruptedException ie) {
// Ignore
}
}
/**
* Signal the maps/reduces to start.
*/
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys,
boolean isMap, String mapSignalFile,
String reduceSignalFile)
throws IOException, TimeoutException {
try {
// signal the maps to complete
writeFile(dfs.getNameNode(), fileSys.getConf(),
isMap
? new Path(mapSignalFile)
: new Path(reduceSignalFile), (short)1);
} catch (InterruptedException ie) {
// Ignore
}
}
static String getSignalFile(Path dir) {
return (new Path(dir, "signal")).toString();
}
static String getMapSignalFile(Path dir) {
return (new Path(dir, "map-signal")).toString();
}
static String getReduceSignalFile(Path dir) {
return (new Path(dir, "reduce-signal")).toString();
}
static void writeFile(NameNode namenode, Configuration conf, Path name,
short replication)
throws IOException, TimeoutException, InterruptedException {
FileSystem fileSys = FileSystem.get(conf);
SequenceFile.Writer writer =
SequenceFile.createWriter(fileSys, conf, name,
BytesWritable.class, BytesWritable.class,
CompressionType.NONE);
writer.append(new BytesWritable(), new BytesWritable());
writer.close();
fileSys.setReplication(name, replication);
DFSTestUtil.waitReplication(fileSys, name, replication);
}
// Input formats
/**
* A custom input format that creates virtual inputs of a single string
* for each map.
*/
public static class RandomInputFormat implements InputFormat<Text, Text> {
public InputSplit[] getSplits(JobConf job,
int numSplits) throws IOException {
InputSplit[] result = new InputSplit[numSplits];
Path outDir = FileOutputFormat.getOutputPath(job);
for(int i=0; i < result.length; ++i) {
result[i] = new FileSplit(new Path(outDir, "dummy-split-" + i),
0, 1, (String[])null);
}
return result;
}
static class RandomRecordReader implements RecordReader<Text, Text> {
Path name;
public RandomRecordReader(Path p) {
name = p;
}
public boolean next(Text key, Text value) {
if (name != null) {
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
public long getPos() {
return 0;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter)
throws IOException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
// Start a job and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir)
throws IOException {
return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks());
}
// Start a job and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds) throws IOException {
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
// submit the job and wait for it to complete
return runJob(conf, inDir, outDir, numMaps, numReds, input);
}
// Start a job with the specified input and return its RunningJob object
static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds, String input) throws IOException {
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (!fs.exists(inDir)) {
fs.mkdirs(inDir);
}
for (int i = 0; i < numMaps; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReds);
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.submitJob(conf);
return job;
}
// Run a job that will be succeeded and wait until it completes
public static RunningJob runJobSucceed(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-succeed");
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
long sleepCount = 0;
while (!job.isComplete()) {
try {
if (sleepCount > 300) { // 30 seconds
throw new IOException("Job didn't finish in 30 seconds");
}
Thread.sleep(100);
sleepCount++;
} catch (InterruptedException e) {
break;
}
}
return job;
}
// Run a job that will be failed and wait until it completes
public static RunningJob runJobFail(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-fail");
conf.setMapperClass(FailMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setMaxMapAttempts(1);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
long sleepCount = 0;
while (!job.isComplete()) {
try {
if (sleepCount > 300) { // 30 seconds
throw new IOException("Job didn't finish in 30 seconds");
}
Thread.sleep(100);
sleepCount++;
} catch (InterruptedException e) {
break;
}
}
return job;
}
// Run a job that will be killed and wait until it completes
public static RunningJob runJobKill(JobConf conf, Path inDir, Path outDir)
throws IOException {
conf.setJobName("test-job-kill");
conf.setMapperClass(KillMapper.class);
conf.setReducerClass(IdentityReducer.class);
RunningJob job = UtilsForTests.runJob(conf, inDir, outDir);
long sleepCount = 0;
while (job.getJobState() != JobStatus.RUNNING) {
try {
if (sleepCount > 300) { // 30 seconds
throw new IOException("Job didn't finish in 30 seconds");
}
Thread.sleep(100);
sleepCount++;
} catch (InterruptedException e) {
break;
}
}
job.killJob();
sleepCount = 0;
while (job.cleanupProgress() == 0.0f) {
try {
if (sleepCount > 2000) { // 20 seconds
throw new IOException("Job cleanup didn't start in 20 seconds");
}
Thread.sleep(10);
sleepCount++;
} catch (InterruptedException ie) {
break;
}
}
return job;
}
/**
* Cleans up files/dirs inline. CleanupQueue deletes in a separate thread
* asynchronously.
*/
public static class InlineCleanupQueue extends CleanupQueue {
List<String> stalePaths = new ArrayList<String>();
public InlineCleanupQueue() {
// do nothing
}
@Override
public void addToQueue(PathDeletionContext... contexts) {
// delete paths in-line
for (PathDeletionContext context : contexts) {
try {
if (!deletePath(context)) {
LOG.warn("Stale path " + context.fullPath);
stalePaths.add(context.fullPath);
}
} catch (IOException e) {
LOG.warn("Caught exception while deleting path "
+ context.fullPath);
LOG.info(StringUtils.stringifyException(e));
stalePaths.add(context.fullPath);
}
}
}
}
static class FakeClock extends Clock {
long time = 0;
public void advance(long millis) {
time += millis;
}
@Override
long getTime() {
return time;
}
}
// Mapper that fails
static class FailMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
//NOTE- the next line is required for the TestDebugScript test to succeed
System.err.println("failing map");
throw new RuntimeException("failing map");
}
}
// Mapper that sleeps for a long time.
// Used for running a job that will be killed
static class KillMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
try {
Thread.sleep(1000000);
} catch (InterruptedException e) {
// Do nothing
}
}
}
static void setUpConfigFile(Properties confProps, File configFile)
throws IOException {
Configuration config = new Configuration(false);
FileOutputStream fos = new FileOutputStream(configFile);
for (Enumeration<?> e = confProps.propertyNames(); e.hasMoreElements();) {
String key = (String) e.nextElement();
config.set(key, confProps.getProperty(key));
}
config.writeXml(fos);
fos.close();
}
/**
* This creates a file in the dfs
* @param dfs FileSystem Local File System where file needs to be picked
* @param URIPATH Path dfs path where file needs to be copied
* @param permission FsPermission File permission
* @return returns the DataOutputStream
*/
public static DataOutputStream
createTmpFileDFS(FileSystem dfs, Path URIPATH,
FsPermission permission, String input) throws Exception {
//Creating the path with the file
DataOutputStream file =
FileSystem.create(dfs, URIPATH, permission);
file.writeBytes(input);
file.close();
return file;
}
/**
* This formats the long tasktracker name to just the FQDN
* @param taskTrackerLong String The long format of the tasktracker string
* @return String The FQDN of the tasktracker
* @throws Exception
*/
public static String getFQDNofTT (String taskTrackerLong) throws Exception {
//Getting the exact FQDN of the tasktracker from the tasktracker string.
String[] firstSplit = taskTrackerLong.split("_");
String tmpOutput = firstSplit[1];
String[] secondSplit = tmpOutput.split(":");
String tmpTaskTracker = secondSplit[0];
return tmpTaskTracker;
}
}
| 25,231 | 29.883721 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.Inflater;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@Ignore
public class TestConcatenatedCompressedInput {
private static final Log LOG =
LogFactory.getLog(TestConcatenatedCompressedInput.class.getName());
private static int MAX_LENGTH = 10000;
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
// from ~roelofs/ss30b-colors.hh
final static String COLOR_RED = "[0;31m"; // background doesn't matter... "[0m"
final static String COLOR_GREEN = "[0;32m"; // background doesn't matter... "[0m"
final static String COLOR_YELLOW = "[0;33;40m"; // DO force black background "[0m"
final static String COLOR_BLUE = "[0;34m"; // do NOT force black background "[0m"
final static String COLOR_MAGENTA = "[0;35m"; // background doesn't matter... "[0m"
final static String COLOR_CYAN = "[0;36m"; // background doesn't matter... "[0m"
final static String COLOR_WHITE = "[0;37;40m"; // DO force black background "[0m"
final static String COLOR_BR_RED = "[1;31m"; // background doesn't matter... "[0m"
final static String COLOR_BR_GREEN = "[1;32m"; // background doesn't matter... "[0m"
final static String COLOR_BR_YELLOW = "[1;33;40m"; // DO force black background "[0m"
final static String COLOR_BR_BLUE = "[1;34m"; // do NOT force black background "[0m"
final static String COLOR_BR_MAGENTA = "[1;35m"; // background doesn't matter... "[0m"
final static String COLOR_BR_CYAN = "[1;36m"; // background doesn't matter... "[0m"
final static String COLOR_BR_WHITE = "[1;37;40m"; // DO force black background "[0m"
final static String COLOR_NORMAL = "[0m";
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestConcatenatedCompressedInput").makeQualified(localFs);
private static LineReader makeStream(String str) throws IOException {
return new LineReader(new ByteArrayInputStream(str.getBytes("UTF-8")),
defaultConf);
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec, String contents)
throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static final Reporter voidReporter = Reporter.NULL;
private static List<Text> readSplit(TextInputFormat format,
InputSplit split, JobConf jobConf)
throws IOException {
List<Text> result = new ArrayList<Text>();
RecordReader<LongWritable, Text> reader =
format.getRecordReader(split, jobConf, voidReporter);
LongWritable key = reader.createKey();
Text value = reader.createValue();
while (reader.next(key, value)) {
result.add(value);
value = reader.createValue();
}
reader.close();
return result;
}
/**
* Test using Hadoop's original, native-zlib gzip codec for reading.
*/
@Test
public void testGzip() throws IOException {
JobConf jobConf = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, jobConf);
localFs.delete(workDir, true);
// preferred, but not compatible with Apache/trunk instance of Hudson:
/*
assertFalse("[native (C/C++) codec]",
(org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class ==
gzip.getDecompressorType()) );
System.out.println(COLOR_BR_RED +
"testGzip() using native-zlib Decompressor (" +
gzip.getDecompressorType() + ")" + COLOR_NORMAL);
*/
// alternative:
if (org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class ==
gzip.getDecompressorType()) {
System.out.println(COLOR_BR_RED +
"testGzip() using native-zlib Decompressor (" +
gzip.getDecompressorType() + ")" + COLOR_NORMAL);
} else {
LOG.warn("testGzip() skipped: native (C/C++) libs not loaded");
return;
}
/*
* // THIS IS BUGGY: omits 2nd/3rd gzip headers; screws up 2nd/3rd CRCs--
* // see https://issues.apache.org/jira/browse/HADOOP-6799
* Path fnHDFS = new Path(workDir, "concat" + gzip.getDefaultExtension());
* //OutputStream out = localFs.create(fnHDFS);
* //GzipCodec.GzipOutputStream gzOStm = new GzipCodec.GzipOutputStream(out);
* // can just combine those two lines, probably
* //GzipCodec.GzipOutputStream gzOStm =
* // new GzipCodec.GzipOutputStream(localFs.create(fnHDFS));
* // oops, no: this is a protected helper class; need to access
* // it via createOutputStream() instead:
* OutputStream out = localFs.create(fnHDFS);
* Compressor gzCmp = gzip.createCompressor();
* CompressionOutputStream gzOStm = gzip.createOutputStream(out, gzCmp);
* // this SHOULD be going to HDFS: got out from localFs == HDFS
* // ...yup, works
* gzOStm.write("first gzip concat\n member\nwith three lines\n".getBytes());
* gzOStm.finish();
* gzOStm.resetState();
* gzOStm.write("2nd gzip concat member\n".getBytes());
* gzOStm.finish();
* gzOStm.resetState();
* gzOStm.write("gzip concat\nmember #3\n".getBytes());
* gzOStm.close();
* //
* String fn = "hdfs-to-local-concat" + gzip.getDefaultExtension();
* Path fnLocal = new Path(System.getProperty("test.concat.data","/tmp"), fn);
* localFs.copyToLocalFile(fnHDFS, fnLocal);
*/
// copy prebuilt (correct!) version of concat.gz to HDFS
final String fn = "concat" + gzip.getDefaultExtension();
Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
Path fnHDFS = new Path(workDir, fn);
localFs.copyFromLocalFile(fnLocal, fnHDFS);
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(jobConf, workDir);
TextInputFormat format = new TextInputFormat();
format.configure(jobConf);
InputSplit[] splits = format.getSplits(jobConf, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0] = splits[1];
splits[1] = tmp;
}
List<Text> results = readSplit(format, splits[0], jobConf);
assertEquals("splits[0] num lines", 6, results.size());
assertEquals("splits[0][5]", "member #3",
results.get(5).toString());
results = readSplit(format, splits[1], jobConf);
assertEquals("splits[1] num lines", 2, results.size());
assertEquals("splits[1][0]", "this is a test",
results.get(0).toString());
assertEquals("splits[1][1]", "of gzip",
results.get(1).toString());
}
/**
* Test using the raw Inflater codec for reading gzip files.
*/
@Test
public void testPrototypeInflaterGzip() throws IOException {
CompressionCodec gzip = new GzipCodec(); // used only for file extension
localFs.delete(workDir, true); // localFs = FileSystem instance
System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " +
"non-native/Java Inflater and manual gzip header/trailer parsing" +
COLOR_NORMAL);
// copy prebuilt (correct!) version of concat.gz to HDFS
final String fn = "concat" + gzip.getDefaultExtension();
Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
Path fnHDFS = new Path(workDir, fn);
localFs.copyFromLocalFile(fnLocal, fnHDFS);
final FileInputStream in = new FileInputStream(fnLocal.toString());
assertEquals("concat bytes available", 148, in.available());
// should wrap all of this header-reading stuff in a running-CRC wrapper
// (did so in BuiltInGzipDecompressor; see below)
byte[] compressedBuf = new byte[256];
int numBytesRead = in.read(compressedBuf, 0, 10);
assertEquals("header bytes read", 10, numBytesRead);
assertEquals("1st byte", 0x1f, compressedBuf[0] & 0xff);
assertEquals("2nd byte", 0x8b, compressedBuf[1] & 0xff);
assertEquals("3rd byte (compression method)", 8, compressedBuf[2] & 0xff);
byte flags = (byte)(compressedBuf[3] & 0xff);
if ((flags & 0x04) != 0) { // FEXTRA
numBytesRead = in.read(compressedBuf, 0, 2);
assertEquals("XLEN bytes read", 2, numBytesRead);
int xlen = ((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
in.skip(xlen);
}
if ((flags & 0x08) != 0) { // FNAME
while ((numBytesRead = in.read()) != 0) {
assertFalse("unexpected end-of-file while reading filename",
numBytesRead == -1);
}
}
if ((flags & 0x10) != 0) { // FCOMMENT
while ((numBytesRead = in.read()) != 0) {
assertFalse("unexpected end-of-file while reading comment",
numBytesRead == -1);
}
}
if ((flags & 0xe0) != 0) { // reserved
assertTrue("reserved bits are set??", (flags & 0xe0) == 0);
}
if ((flags & 0x02) != 0) { // FHCRC
numBytesRead = in.read(compressedBuf, 0, 2);
assertEquals("CRC16 bytes read", 2, numBytesRead);
int crc16 = ((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
}
// ready to go! next bytes should be start of deflated stream, suitable
// for Inflater
numBytesRead = in.read(compressedBuf);
// Inflater docs refer to a "dummy byte": no clue what that's about;
// appears to work fine without one
byte[] uncompressedBuf = new byte[256];
Inflater inflater = new Inflater(true);
inflater.setInput(compressedBuf, 0, numBytesRead);
try {
int numBytesUncompressed = inflater.inflate(uncompressedBuf);
String outString =
new String(uncompressedBuf, 0, numBytesUncompressed, "UTF-8");
System.out.println("uncompressed data of first gzip member = [" +
outString + "]");
} catch (java.util.zip.DataFormatException ex) {
throw new IOException(ex.getMessage());
}
in.close();
}
/**
* Test using the new BuiltInGzipDecompressor codec for reading gzip files.
*/
// NOTE: This fails on RHEL4 with "java.io.IOException: header crc mismatch"
// due to buggy version of zlib (1.2.1.2) included.
@Test
public void testBuiltInGzipDecompressor() throws IOException {
JobConf jobConf = new JobConf(defaultConf);
jobConf.setBoolean("io.native.lib.available", false);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, jobConf);
localFs.delete(workDir, true);
assertEquals("[non-native (Java) codec]",
org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class,
gzip.getDecompressorType());
System.out.println(COLOR_BR_YELLOW + "testBuiltInGzipDecompressor() using" +
" non-native (Java Inflater) Decompressor (" + gzip.getDecompressorType()
+ ")" + COLOR_NORMAL);
// copy single-member test file to HDFS
String fn1 = "testConcatThenCompress.txt" + gzip.getDefaultExtension();
Path fnLocal1 = new Path(System.getProperty("test.concat.data","/tmp"),fn1);
Path fnHDFS1 = new Path(workDir, fn1);
localFs.copyFromLocalFile(fnLocal1, fnHDFS1);
// copy multiple-member test file to HDFS
// (actually in "seekable gzip" format, a la JIRA PIG-42)
String fn2 = "testCompressThenConcat.txt" + gzip.getDefaultExtension();
Path fnLocal2 = new Path(System.getProperty("test.concat.data","/tmp"),fn2);
Path fnHDFS2 = new Path(workDir, fn2);
localFs.copyFromLocalFile(fnLocal2, fnHDFS2);
FileInputFormat.setInputPaths(jobConf, workDir);
// here's first pair of DecompressorStreams:
final FileInputStream in1 = new FileInputStream(fnLocal1.toString());
final FileInputStream in2 = new FileInputStream(fnLocal2.toString());
assertEquals("concat bytes available", 2734, in1.available());
assertEquals("concat bytes available", 3413, in2.available()); // w/hdr CRC
CompressionInputStream cin2 = gzip.createInputStream(in2);
LineReader in = new LineReader(cin2);
Text out = new Text();
int numBytes, totalBytes=0, lineNum=0;
while ((numBytes = in.readLine(out)) > 0) {
++lineNum;
totalBytes += numBytes;
}
in.close();
assertEquals("total uncompressed bytes in concatenated test file",
5346, totalBytes);
assertEquals("total uncompressed lines in concatenated test file",
84, lineNum);
// test BuiltInGzipDecompressor with lots of different input-buffer sizes
doMultipleGzipBufferSizes(jobConf, false);
// test GzipZlibDecompressor (native), just to be sure
// (FIXME? could move this call to testGzip(), but would need filename
// setup above) (alternatively, maybe just nuke testGzip() and extend this?)
doMultipleGzipBufferSizes(jobConf, true);
}
// this tests either the native or the non-native gzip decoder with 43
// input-buffer sizes in order to try to catch any parser/state-machine
// errors at buffer boundaries
private static void doMultipleGzipBufferSizes(JobConf jConf,
boolean useNative)
throws IOException {
System.out.println(COLOR_YELLOW + "doMultipleGzipBufferSizes() using " +
(useNative? "GzipZlibDecompressor" : "BuiltInGzipDecompressor") +
COLOR_NORMAL);
jConf.setBoolean("io.native.lib.available", useNative);
int bufferSize;
// ideally would add some offsets/shifts in here (e.g., via extra fields
// of various sizes), but...significant work to hand-generate each header
for (bufferSize = 1; bufferSize < 34; ++bufferSize) {
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
}
bufferSize = 512;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 2*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 4*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 63*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 64*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 65*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 127*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 128*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
bufferSize = 129*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleGzipBufferSize(jConf);
}
// this tests both files (testCompressThenConcat, testConcatThenCompress);
// all should work with either native zlib or new Inflater-based decoder
private static void doSingleGzipBufferSize(JobConf jConf) throws IOException {
TextInputFormat format = new TextInputFormat();
format.configure(jConf);
// here's Nth pair of DecompressorStreams:
InputSplit[] splits = format.getSplits(jConf, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("testCompressThenConcat.txt.gz")) {
System.out.println(" (swapping)");
splits[0] = splits[1];
splits[1] = tmp;
}
List<Text> results = readSplit(format, splits[0], jConf);
assertEquals("splits[0] length (num lines)", 84, results.size());
assertEquals("splits[0][0]",
"Call me Ishmael. Some years ago--never mind how long precisely--having",
results.get(0).toString());
assertEquals("splits[0][42]",
"Tell me, does the magnetic virtue of the needles of the compasses of",
results.get(42).toString());
results = readSplit(format, splits[1], jConf);
assertEquals("splits[1] length (num lines)", 84, results.size());
assertEquals("splits[1][0]",
"Call me Ishmael. Some years ago--never mind how long precisely--having",
results.get(0).toString());
assertEquals("splits[1][42]",
"Tell me, does the magnetic virtue of the needles of the compasses of",
results.get(42).toString());
}
/**
* Test using the bzip2 codec for reading
*/
@Test
public void testBzip2() throws IOException {
JobConf jobConf = new JobConf(defaultConf);
CompressionCodec bzip2 = new BZip2Codec();
ReflectionUtils.setConf(bzip2, jobConf);
localFs.delete(workDir, true);
System.out.println(COLOR_BR_CYAN +
"testBzip2() using non-native CBZip2InputStream (presumably)" +
COLOR_NORMAL);
// copy prebuilt (correct!) version of concat.bz2 to HDFS
final String fn = "concat" + bzip2.getDefaultExtension();
Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
Path fnHDFS = new Path(workDir, fn);
localFs.copyFromLocalFile(fnLocal, fnHDFS);
writeFile(localFs, new Path(workDir, "part2.txt.bz2"), bzip2,
"this is a test\nof bzip2\n");
FileInputFormat.setInputPaths(jobConf, workDir);
TextInputFormat format = new TextInputFormat(); // extends FileInputFormat
format.configure(jobConf);
format.setMinSplitSize(256); // work around 2-byte splits issue
// [135 splits for a 208-byte file and a 62-byte file(!)]
InputSplit[] splits = format.getSplits(jobConf, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("part2.txt.bz2")) {
splits[0] = splits[1];
splits[1] = tmp;
}
List<Text> results = readSplit(format, splits[0], jobConf);
assertEquals("splits[0] num lines", 6, results.size());
assertEquals("splits[0][5]", "member #3",
results.get(5).toString());
results = readSplit(format, splits[1], jobConf);
assertEquals("splits[1] num lines", 2, results.size());
assertEquals("splits[1][0]", "this is a test",
results.get(0).toString());
assertEquals("splits[1][1]", "of bzip2",
results.get(1).toString());
}
/**
* Extended bzip2 test, similar to BuiltInGzipDecompressor test above.
*/
@Test
public void testMoreBzip2() throws IOException {
JobConf jobConf = new JobConf(defaultConf);
CompressionCodec bzip2 = new BZip2Codec();
ReflectionUtils.setConf(bzip2, jobConf);
localFs.delete(workDir, true);
System.out.println(COLOR_BR_MAGENTA +
"testMoreBzip2() using non-native CBZip2InputStream (presumably)" +
COLOR_NORMAL);
// copy single-member test file to HDFS
String fn1 = "testConcatThenCompress.txt" + bzip2.getDefaultExtension();
Path fnLocal1 = new Path(System.getProperty("test.concat.data","/tmp"),fn1);
Path fnHDFS1 = new Path(workDir, fn1);
localFs.copyFromLocalFile(fnLocal1, fnHDFS1);
// copy multiple-member test file to HDFS
String fn2 = "testCompressThenConcat.txt" + bzip2.getDefaultExtension();
Path fnLocal2 = new Path(System.getProperty("test.concat.data","/tmp"),fn2);
Path fnHDFS2 = new Path(workDir, fn2);
localFs.copyFromLocalFile(fnLocal2, fnHDFS2);
FileInputFormat.setInputPaths(jobConf, workDir);
// here's first pair of BlockDecompressorStreams:
final FileInputStream in1 = new FileInputStream(fnLocal1.toString());
final FileInputStream in2 = new FileInputStream(fnLocal2.toString());
assertEquals("concat bytes available", 2567, in1.available());
assertEquals("concat bytes available", 3056, in2.available());
/*
// FIXME
// The while-loop below dies at the beginning of the 2nd concatenated
// member (after 17 lines successfully read) with:
//
// java.io.IOException: bad block header
// at org.apache.hadoop.io.compress.bzip2.CBZip2InputStream.initBlock(
// CBZip2InputStream.java:527)
//
// It is not critical to concatenated-gzip support, HADOOP-6835, so it's
// simply commented out for now (and HADOOP-6852 filed). If and when the
// latter issue is resolved--perhaps by fixing an error here--this code
// should be reenabled. Note that the doMultipleBzip2BufferSizes() test
// below uses the same testCompressThenConcat.txt.bz2 file but works fine.
CompressionInputStream cin2 = bzip2.createInputStream(in2);
LineReader in = new LineReader(cin2);
Text out = new Text();
int numBytes, totalBytes=0, lineNum=0;
while ((numBytes = in.readLine(out)) > 0) {
++lineNum;
totalBytes += numBytes;
}
in.close();
assertEquals("total uncompressed bytes in concatenated test file",
5346, totalBytes);
assertEquals("total uncompressed lines in concatenated test file",
84, lineNum);
*/
// test CBZip2InputStream with lots of different input-buffer sizes
doMultipleBzip2BufferSizes(jobConf, false);
// no native version of bzip2 codec (yet?)
//doMultipleBzip2BufferSizes(jobConf, true);
}
// this tests either the native or the non-native gzip decoder with more than
// three dozen input-buffer sizes in order to try to catch any parser/state-
// machine errors at buffer boundaries
private static void doMultipleBzip2BufferSizes(JobConf jConf,
boolean useNative)
throws IOException {
System.out.println(COLOR_MAGENTA + "doMultipleBzip2BufferSizes() using " +
"default bzip2 decompressor" + COLOR_NORMAL);
jConf.setBoolean("io.native.lib.available", useNative);
int bufferSize;
// ideally would add some offsets/shifts in here (e.g., via extra header
// data?), but...significant work to hand-generate each header, and no
// bzip2 spec for reference
for (bufferSize = 1; bufferSize < 34; ++bufferSize) {
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
}
bufferSize = 512;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 2*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 4*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 63*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 64*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 65*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 127*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 128*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
bufferSize = 129*1024;
jConf.setInt("io.file.buffer.size", bufferSize);
doSingleBzip2BufferSize(jConf);
}
// this tests both files (testCompressThenConcat, testConcatThenCompress); all
// should work with existing Java bzip2 decoder and any future native version
private static void doSingleBzip2BufferSize(JobConf jConf) throws IOException {
TextInputFormat format = new TextInputFormat();
format.configure(jConf);
format.setMinSplitSize(5500); // work around 256-byte/22-splits issue
// here's Nth pair of DecompressorStreams:
InputSplit[] splits = format.getSplits(jConf, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("testCompressThenConcat.txt.gz")) {
System.out.println(" (swapping)");
splits[0] = splits[1];
splits[1] = tmp;
}
// testConcatThenCompress (single)
List<Text> results = readSplit(format, splits[0], jConf);
assertEquals("splits[0] length (num lines)", 84, results.size());
assertEquals("splits[0][0]",
"Call me Ishmael. Some years ago--never mind how long precisely--having",
results.get(0).toString());
assertEquals("splits[0][42]",
"Tell me, does the magnetic virtue of the needles of the compasses of",
results.get(42).toString());
// testCompressThenConcat (multi)
results = readSplit(format, splits[1], jConf);
assertEquals("splits[1] length (num lines)", 84, results.size());
assertEquals("splits[1][0]",
"Call me Ishmael. Some years ago--never mind how long precisely--having",
results.get(0).toString());
assertEquals("splits[1][42]",
"Tell me, does the magnetic virtue of the needles of the compasses of",
results.get(42).toString());
}
private static String unquote(String in) {
StringBuffer result = new StringBuffer();
for(int i=0; i < in.length(); ++i) {
char ch = in.charAt(i);
if (ch == '\\') {
ch = in.charAt(++i);
switch (ch) {
case 'n':
result.append('\n');
break;
case 'r':
result.append('\r');
break;
default:
result.append(ch);
break;
}
} else {
result.append(ch);
}
}
return result.toString();
}
/**
* Parse the command line arguments into lines and display the result.
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
for(String arg: args) {
System.out.println("Working on " + arg);
LineReader reader = makeStream(unquote(arg));
Text line = new Text();
int size = reader.readLine(line);
while (size > 0) {
System.out.println("Got: " + line.toString());
size = reader.readLine(line);
}
reader.close();
}
}
}
| 28,081 | 37.627235 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
/**
*
* test MultiFileSplit class
*/
public class TestMultiFileSplit extends TestCase{
public void testReadWrite() throws Exception {
MultiFileSplit split = new MultiFileSplit(new JobConf(), new Path[] {new Path("/test/path/1"), new Path("/test/path/2")}, new long[] {100,200});
ByteArrayOutputStream bos = null;
byte[] result = null;
try {
bos = new ByteArrayOutputStream();
split.write(new DataOutputStream(bos));
result = bos.toByteArray();
} finally {
IOUtils.closeStream(bos);
}
MultiFileSplit readSplit = new MultiFileSplit();
ByteArrayInputStream bis = null;
try {
bis = new ByteArrayInputStream(result);
readSplit.readFields(new DataInputStream(bis));
} finally {
IOUtils.closeStream(bis);
}
assertTrue(split.getLength() != 0);
assertEquals(split.getLength(), readSplit.getLength());
assertTrue(Arrays.equals(split.getPaths(), readSplit.getPaths()));
assertTrue(Arrays.equals(split.getLengths(), readSplit.getLengths()));
System.out.println(split.toString());
}
/**
* test method getLocations
* @throws IOException
*/
public void testgetLocations() throws IOException{
JobConf job= new JobConf();
File tmpFile = File.createTempFile("test","txt");
tmpFile.createNewFile();
OutputStream out=new FileOutputStream(tmpFile);
out.write("tempfile".getBytes());
out.flush();
out.close();
Path[] path= {new Path(tmpFile.getAbsolutePath())};
long[] lengths = {100};
MultiFileSplit split = new MultiFileSplit(job,path,lengths);
String [] locations= split.getLocations();
assertTrue(locations.length==1);
assertEquals(locations[0], "localhost");
}
}
| 3,086 | 32.923077 | 150 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobSysDirWithDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
/**
* A JUnit test to test Job System Directory with Mini-DFS.
*/
public class TestJobSysDirWithDFS extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestJobSysDirWithDFS.class.getName());
static final int NUM_MAPS = 10;
static final int NUM_SAMPLES = 100000;
public static class TestResult {
public String output;
public RunningJob job;
TestResult(RunningJob job, String output) {
this.job = job;
this.output = output;
}
}
public static TestResult launchWordCount(JobConf conf,
Path inDir,
Path outDir,
String input,
int numMaps,
int numReduces,
String sysDir) throws IOException {
FileSystem inFs = inDir.getFileSystem(conf);
FileSystem outFs = outDir.getFileSystem(conf);
outFs.delete(outDir, true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
conf.setJobName("wordcount");
conf.setInputFormat(TextInputFormat.class);
// the keys are words (strings)
conf.setOutputKeyClass(Text.class);
// the values are counts (ints)
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCount.MapClass.class);
conf.setCombinerClass(WordCount.Reduce.class);
conf.setReducerClass(WordCount.Reduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReduces);
conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/subru/mapred/system");
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.runJob(conf);
// Checking that the Job Client system dir is not used
assertFalse(FileSystem.get(conf).exists(
new Path(conf.get(JTConfig.JT_SYSTEM_DIR))));
// Check if the Job Tracker system dir is propogated to client
assertFalse(sysDir.contains("/tmp/subru/mapred/system"));
assertTrue(sysDir.contains("custom"));
return new TestResult(job, MapReduceTestUtil.readOutput(outDir, conf));
}
static void runWordCount(MiniMRCluster mr, JobConf jobConf, String sysDir)
throws IOException {
LOG.info("runWordCount");
// Run a word count example
// Keeping tasks that match this pattern
TestResult result;
final Path inDir = new Path("./wc/input");
final Path outDir = new Path("./wc/output");
result = launchWordCount(jobConf, inDir, outDir,
"The quick brown fox\nhas many silly\n" +
"red fox sox\n",
3, 1, sysDir);
assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
"quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result.output);
// Checking if the Job ran successfully in spite of different system dir config
// between Job Client & Job Tracker
assertTrue(result.job.isSuccessful());
}
public void testWithDFS() throws IOException {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
final int taskTrackers = 4;
JobConf conf = new JobConf();
conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system");
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf);
runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir"));
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
}
| 5,318 | 36.723404 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import static org.junit.Assert.*;
@SuppressWarnings(value={"unchecked", "deprecation"})
/**
* This test tests the support for a merge operation in Hadoop. The input files
* are already sorted on the key. This test implements an external
* MapOutputCollector implementation that just copies the records to different
* partitions while maintaining the sort order in each partition. The Hadoop
* framework's merge on the reduce side will merge the partitions created to
* generate the final output which is sorted on the key.
*/
public class TestMRIntermediateDataEncryption {
// Where MR job's input will reside.
private static final Path INPUT_DIR = new Path("/test/input");
// Where output goes.
private static final Path OUTPUT = new Path("/test/output");
@Test
public void testSingleReducer() throws Exception {
doEncryptionTest(3, 1, 2, false);
}
@Test
public void testUberMode() throws Exception {
doEncryptionTest(3, 1, 2, true);
}
@Test
public void testMultipleMapsPerNode() throws Exception {
doEncryptionTest(8, 1, 2, false);
}
@Test
public void testMultipleReducers() throws Exception {
doEncryptionTest(2, 4, 2, false);
}
public void doEncryptionTest(int numMappers, int numReducers, int numNodes,
boolean isUber) throws Exception {
doEncryptionTest(numMappers, numReducers, numNodes, 1000, isUber);
}
public void doEncryptionTest(int numMappers, int numReducers, int numNodes,
int numLines, boolean isUber) throws Exception {
MiniDFSCluster dfsCluster = null;
MiniMRClientCluster mrCluster = null;
FileSystem fileSystem = null;
try {
Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters
dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numNodes).build();
fileSystem = dfsCluster.getFileSystem();
mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
numNodes, conf);
// Generate input.
createInput(fileSystem, numMappers, numLines);
// Run the test.
runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem,
numMappers, numReducers, numLines, isUber);
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
if (mrCluster != null) {
mrCluster.stop();
}
}
}
private void createInput(FileSystem fs, int numMappers, int numLines) throws Exception {
fs.delete(INPUT_DIR, true);
for (int i = 0; i < numMappers; i++) {
OutputStream os = fs.create(new Path(INPUT_DIR, "input_" + i + ".txt"));
Writer writer = new OutputStreamWriter(os);
for (int j = 0; j < numLines; j++) {
// Create sorted key, value pairs.
int k = j + 1;
String formattedNumber = String.format("%09d", k);
writer.write(formattedNumber + " " + formattedNumber + "\n");
}
writer.close();
}
}
private void runMergeTest(JobConf job, FileSystem fileSystem, int
numMappers, int numReducers, int numLines, boolean isUber)
throws Exception {
fileSystem.delete(OUTPUT, true);
job.setJobName("Test");
JobClient client = new JobClient(job);
RunningJob submittedJob = null;
FileInputFormat.setInputPaths(job, INPUT_DIR);
FileOutputFormat.setOutputPath(job, OUTPUT);
job.set("mapreduce.output.textoutputformat.separator", " ");
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(MyMapper.class);
job.setPartitionerClass(MyPartitioner.class);
job.setOutputFormat(TextOutputFormat.class);
job.setNumReduceTasks(numReducers);
job.setInt("mapreduce.map.maxattempts", 1);
job.setInt("mapreduce.reduce.maxattempts", 1);
job.setInt("mapred.test.num_lines", numLines);
if (isUber) {
job.setBoolean("mapreduce.job.ubertask.enable", true);
}
job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
try {
submittedJob = client.submitJob(job);
try {
if (! client.monitorAndPrintJob(job, submittedJob)) {
throw new IOException("Job failed!");
}
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
}
} catch(IOException ioe) {
System.err.println("Job failed with: " + ioe);
} finally {
verifyOutput(submittedJob, fileSystem, numMappers, numLines);
}
}
private void verifyOutput(RunningJob submittedJob, FileSystem fileSystem, int numMappers, int numLines)
throws Exception {
FSDataInputStream dis = null;
long numValidRecords = 0;
long numInvalidRecords = 0;
String prevKeyValue = "000000000";
Path[] fileList =
FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT,
new Utils.OutputFileUtils.OutputFilesFilter()));
for (Path outFile : fileList) {
try {
dis = fileSystem.open(outFile);
String record;
while((record = dis.readLine()) != null) {
// Split the line into key and value.
int blankPos = record.indexOf(" ");
String keyString = record.substring(0, blankPos);
String valueString = record.substring(blankPos+1);
// Check for sorted output and correctness of record.
if (keyString.compareTo(prevKeyValue) >= 0
&& keyString.equals(valueString)) {
prevKeyValue = keyString;
numValidRecords++;
} else {
numInvalidRecords++;
}
}
} finally {
if (dis != null) {
dis.close();
dis = null;
}
}
}
// Make sure we got all input records in the output in sorted order.
assertEquals((long)(numMappers * numLines), numValidRecords);
// Make sure there is no extraneous invalid record.
assertEquals(0, numInvalidRecords);
}
/**
* A mapper implementation that assumes that key text contains valid integers
* in displayable form.
*/
public static class MyMapper extends MapReduceBase
implements Mapper<LongWritable, Text, Text, Text> {
private Text keyText;
private Text valueText;
public MyMapper() {
keyText = new Text();
valueText = new Text();
}
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
String record = value.toString();
int blankPos = record.indexOf(" ");
keyText.set(record.substring(0, blankPos));
valueText.set(record.substring(blankPos+1));
output.collect(keyText, valueText);
}
public void close() throws IOException {
}
}
/**
* Partitioner implementation to make sure that output is in total sorted
* order. We basically route key ranges to different reducers such that
* key values monotonically increase with the partition number. For example,
* in this test, the keys are numbers from 1 to 1000 in the form "000000001"
* to "000001000" in each input file. The keys "000000001" to "000000250" are
* routed to partition 0, "000000251" to "000000500" are routed to partition 1
* and so on since we have 4 reducers.
*/
static class MyPartitioner implements Partitioner<Text, Text> {
private JobConf job;
public MyPartitioner() {
}
public void configure(JobConf job) {
this.job = job;
}
public int getPartition(Text key, Text value, int numPartitions) {
int keyValue = 0;
try {
keyValue = Integer.parseInt(key.toString());
} catch(NumberFormatException nfe) {
keyValue = 0;
}
int partitionNumber = (numPartitions*(Math.max(0, keyValue-1)))/job.getInt("mapred.test.num_lines", 10000);
return partitionNumber;
}
}
}
| 9,456 | 34.419476 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFileStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import junit.framework.TestCase;
public class TestIFileStreams extends TestCase {
public void testIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
for (int i = 0; i < DLEN; ++i) {
assertEquals(i, ifis.read());
}
ifis.close();
}
public void testBadIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
final byte[] b = dob.getData();
++b[17];
dib.reset(b, DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
int i = 0;
try {
while (i < DLEN) {
if (17 == i) {
assertEquals(18, ifis.read());
} else {
assertEquals(i, ifis.read());
}
++i;
}
ifis.close();
} catch (ChecksumException e) {
assertEquals("Unexpected bad checksum", DLEN - 1, i);
return;
}
fail("Did not detect bad data in checksum");
}
public void testBadLength() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 100, new Configuration());
int i = 0;
try {
while (i < DLEN - 8) {
assertEquals(i++, ifis.read());
}
ifis.close();
} catch (ChecksumException e) {
assertEquals("Checksum before close", i, DLEN - 8);
return;
}
fail("Did not detect bad data in checksum");
}
}
| 3,269 | 31.058824 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestUtils {
private static final Path[] LOG_PATHS = new Path[] {
new Path("file:///foo/_logs"),
new Path("file:///foo/_logs/"),
new Path("_logs/"),
new Path("_logs")
};
private static final Path[] SUCCEEDED_PATHS = new Path[] {
new Path("file:///blah/" + FileOutputCommitter.SUCCEEDED_FILE_NAME)
};
private static final Path[] PASS_PATHS = new Path[] {
new Path("file:///my_logs/blah"),
new Path("file:///a/b/c"),
new Path("file:///foo/_logs/blah"),
new Path("_logs/foo"),
new Path("file:///blah/" +
FileOutputCommitter.SUCCEEDED_FILE_NAME +
"/bar")
};
@Test
public void testOutputFilesFilter() {
PathFilter filter = new Utils.OutputFileUtils.OutputFilesFilter();
for (Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for (Path p : SUCCEEDED_PATHS) {
assertFalse(filter.accept(p));
}
for (Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
@Test
public void testLogFilter() {
PathFilter filter = new Utils.OutputFileUtils.OutputLogFilter();
for (Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for (Path p : SUCCEEDED_PATHS) {
assertTrue(filter.accept(p));
}
for (Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
}
| 2,320 | 28.0125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.File;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Distributed threaded map benchmark.
* <p>
* This benchmark generates random data per map and tests the performance
* of having multiple spills (using multiple threads) over having just one
* spill. Following are the parameters that can be specified
* <li>File size per map.
* <li>Number of spills per map.
* <li>Number of maps per host.
* <p>
* Sort is used for benchmarking the performance.
*/
public class ThreadedMapBenchmark extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(ThreadedMapBenchmark.class);
private static Path BASE_DIR =
new Path(System.getProperty("test.build.data",
File.separator + "benchmarks" + File.separator
+ "ThreadedMapBenchmark"));
private static Path INPUT_DIR = new Path(BASE_DIR, "input");
private static Path OUTPUT_DIR = new Path(BASE_DIR, "output");
private static final float FACTOR = 2.3f; // mapreduce.task.io.sort.mb set to
// (FACTOR * data_size) should
// result in only 1 spill
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
/**
* Generates random input data of given size with keys and values of given
* sizes. By default it generates 128mb input data with 10 byte keys and 10
* byte values.
*/
public static class Map extends MapReduceBase
implements Mapper<WritableComparable, Writable,
BytesWritable, BytesWritable> {
private long numBytesToWrite;
private int minKeySize;
private int keySizeRange;
private int minValueSize;
private int valueSizeRange;
private Random random = new Random();
private BytesWritable randomKey = new BytesWritable();
private BytesWritable randomValue = new BytesWritable();
private void randomizeBytes(byte[] data, int offset, int length) {
for(int i = offset + length - 1; i >= offset; --i) {
data[i] = (byte) random.nextInt(256);
}
}
public void map(WritableComparable key,
Writable value,
OutputCollector<BytesWritable, BytesWritable> output,
Reporter reporter) throws IOException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize
+ (keySizeRange != 0
? random.nextInt(keySizeRange)
: 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize
+ (valueSizeRange != 0
? random.nextInt(valueSizeRange)
: 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
output.collect(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
reporter.incrCounter(Counters.BYTES_WRITTEN, 1);
reporter.incrCounter(Counters.RECORDS_WRITTEN, 1);
if (++itemCount % 200 == 0) {
reporter.setStatus("wrote record " + itemCount + ". "
+ numBytesToWrite + " bytes left.");
}
}
reporter.setStatus("done with " + itemCount + " records.");
}
@Override
public void configure(JobConf job) {
numBytesToWrite = job.getLong("test.tmb.bytes_per_map",
128 * 1024 * 1024);
minKeySize = job.getInt("test.tmb.min_key", 10);
keySizeRange = job.getInt("test.tmb.max_key", 10) - minKeySize;
minValueSize = job.getInt("test.tmb.min_value", 10);
valueSizeRange = job.getInt("test.tmb.max_value", 10) - minValueSize;
}
}
/**
* Generate input data for the benchmark
*/
public static void generateInputData(int dataSizePerMap,
int numSpillsPerMap,
int numMapsPerHost,
JobConf masterConf)
throws Exception {
JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.class);
job.setJobName("threaded-map-benchmark-random-writer");
job.setJarByClass(ThreadedMapBenchmark.class);
job.setInputFormat(UtilsForTests.RandomInputFormat.class);
job.setOutputFormat(SequenceFileOutputFormat.class);
job.setMapperClass(Map.class);
job.setReducerClass(IdentityReducer.class);
job.setOutputKeyClass(BytesWritable.class);
job.setOutputValueClass(BytesWritable.class);
JobClient client = new JobClient(job);
ClusterStatus cluster = client.getClusterStatus();
long totalDataSize = dataSizePerMap * numMapsPerHost
* cluster.getTaskTrackers();
job.set("test.tmb.bytes_per_map",
String.valueOf(dataSizePerMap * 1024 * 1024));
job.setNumReduceTasks(0); // none reduce
job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
FileOutputFormat.setOutputPath(job, INPUT_DIR);
FileSystem fs = FileSystem.get(job);
fs.delete(BASE_DIR, true);
LOG.info("Generating random input for the benchmark");
LOG.info("Total data : " + totalDataSize + " mb");
LOG.info("Data per map: " + dataSizePerMap + " mb");
LOG.info("Number of spills : " + numSpillsPerMap);
LOG.info("Number of maps per host : " + numMapsPerHost);
LOG.info("Number of hosts : " + cluster.getTaskTrackers());
JobClient.runJob(job); // generates the input for the benchmark
}
/**
* This is the main routine for launching the benchmark. It generates random
* input data. The input is non-splittable. Sort is used for benchmarking.
* This benchmark reports the effect of having multiple sort and spill
* cycles over a single sort and spill.
*
* @throws IOException
*/
public int run (String[] args) throws Exception {
LOG.info("Starting the benchmark for threaded spills");
String version = "ThreadedMapBenchmark.0.0.1";
System.out.println(version);
String usage =
"Usage: threadedmapbenchmark " +
"[-dataSizePerMap <data size (in mb) per map, default is 128 mb>] " +
"[-numSpillsPerMap <number of spills per map, default is 2>] " +
"[-numMapsPerHost <number of maps per host, default is 1>]";
int dataSizePerMap = 128; // in mb
int numSpillsPerMap = 2;
int numMapsPerHost = 1;
JobConf masterConf = new JobConf(getConf());
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-dataSizePerMap")) {
dataSizePerMap = Integer.parseInt(args[++i]);
} else if (args[i].equals("-numSpillsPerMap")) {
numSpillsPerMap = Integer.parseInt(args[++i]);
} else if (args[i].equals("-numMapsPerHost")) {
numMapsPerHost = Integer.parseInt(args[++i]);
} else {
System.err.println(usage);
System.exit(-1);
}
}
if (dataSizePerMap < 1 || // verify arguments
numSpillsPerMap < 1 ||
numMapsPerHost < 1)
{
System.err.println(usage);
System.exit(-1);
}
FileSystem fs = null;
try {
// using random-writer to generate the input data
generateInputData(dataSizePerMap, numSpillsPerMap, numMapsPerHost,
masterConf);
// configure job for sorting
JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.class);
job.setJobName("threaded-map-benchmark-unspilled");
job.setJarByClass(ThreadedMapBenchmark.class);
job.setInputFormat(NonSplitableSequenceFileInputFormat.class);
job.setOutputFormat(SequenceFileOutputFormat.class);
job.setOutputKeyClass(BytesWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
FileInputFormat.addInputPath(job, INPUT_DIR);
FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
JobClient client = new JobClient(job);
ClusterStatus cluster = client.getClusterStatus();
job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
job.setNumReduceTasks(1);
// set mapreduce.task.io.sort.mb to avoid spill
int ioSortMb = (int)Math.ceil(FACTOR * dataSizePerMap);
job.set(JobContext.IO_SORT_MB, String.valueOf(ioSortMb));
fs = FileSystem.get(job);
LOG.info("Running sort with 1 spill per map");
long startTime = System.currentTimeMillis();
JobClient.runJob(job);
long endTime = System.currentTimeMillis();
LOG.info("Total time taken : " + String.valueOf(endTime - startTime)
+ " millisec");
fs.delete(OUTPUT_DIR, true);
// set mapreduce.task.io.sort.mb to have multiple spills
JobConf spilledJob = new JobConf(job, ThreadedMapBenchmark.class);
ioSortMb = (int)Math.ceil(FACTOR
* Math.ceil((double)dataSizePerMap
/ numSpillsPerMap));
spilledJob.set(JobContext.IO_SORT_MB, String.valueOf(ioSortMb));
spilledJob.setJobName("threaded-map-benchmark-spilled");
spilledJob.setJarByClass(ThreadedMapBenchmark.class);
LOG.info("Running sort with " + numSpillsPerMap + " spills per map");
startTime = System.currentTimeMillis();
JobClient.runJob(spilledJob);
endTime = System.currentTimeMillis();
LOG.info("Total time taken : " + String.valueOf(endTime - startTime)
+ " millisec");
} finally {
if (fs != null) {
fs.delete(BASE_DIR, true);
}
}
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new ThreadedMapBenchmark(), args);
System.exit(res);
}
}
| 11,491 | 38.356164 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
import org.apache.hadoop.mapred.lib.CombineFileSplit;
import org.apache.hadoop.mapred.lib.CombineFileRecordReader;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class TestCombineFileInputFormat {
private static final Log LOG =
LogFactory.getLog(TestCombineFileInputFormat.class.getName());
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestCombineFileInputFormat").makeQualified(localFs);
private static void writeFile(FileSystem fs, Path name,
String contents) throws IOException {
OutputStream stm;
stm = fs.create(name);
stm.write(contents.getBytes());
stm.close();
}
/**
* Test getSplits
*/
@Test
@SuppressWarnings("unchecked")
public void testSplits() throws IOException {
JobConf job = new JobConf(defaultConf);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "test.txt"),
"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
FileInputFormat.setInputPaths(job, workDir);
CombineFileInputFormat format = new CombineFileInputFormat() {
@Override
public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
return new CombineFileRecordReader(job, (CombineFileSplit)split, reporter, CombineFileRecordReader.class);
}
};
final int SIZE_SPLITS = 1;
LOG.info("Trying to getSplits with splits = " + SIZE_SPLITS);
InputSplit[] splits = format.getSplits(job, SIZE_SPLITS);
LOG.info("Got getSplits = " + splits.length);
assertEquals("splits == " + SIZE_SPLITS, SIZE_SPLITS, splits.length);
}
}
| 3,153 | 35.674419 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/SortValidator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import java.net.URI;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.lib.HashPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.fs.*;
/**
* A set of utilities to validate the <b>sort</b> of the map-reduce framework.
* This utility program has 2 main parts:
* 1. Checking the records' statistics
* a) Validates the no. of bytes and records in sort's input & output.
* b) Validates the xor of the md5's of each key/value pair.
* c) Ensures same key/value is present in both input and output.
* 2. Check individual records to ensure each record is present in both
* the input and the output of the sort (expensive on large data-sets).
*
* To run: bin/hadoop jar build/hadoop-examples.jar sortvalidate
* [-m <i>maps</i>] [-r <i>reduces</i>] [-deep]
* -sortInput <i>sort-in-dir</i> -sortOutput <i>sort-out-dir</i>
*/
public class SortValidator extends Configured implements Tool {
static private final IntWritable sortInput = new IntWritable(1);
static private final IntWritable sortOutput = new IntWritable(2);
static public String SORT_REDUCES =
"mapreduce.sortvalidator.sort.reduce.tasks";
static public String MAPS_PER_HOST = "mapreduce.sortvalidator.mapsperhost";
static public String REDUCES_PER_HOST =
"mapreduce.sortvalidator.reducesperhost";
static void printUsage() {
System.err.println("sortvalidate [-m <maps>] [-r <reduces>] [-deep] " +
"-sortInput <sort-input-dir> -sortOutput <sort-output-dir>");
System.exit(1);
}
static private IntWritable deduceInputFile(JobConf job) {
Path[] inputPaths = FileInputFormat.getInputPaths(job);
Path inputFile = new Path(job.get(JobContext.MAP_INPUT_FILE));
// value == one for sort-input; value == two for sort-output
return (inputFile.getParent().equals(inputPaths[0])) ?
sortInput : sortOutput;
}
static private byte[] pair(BytesWritable a, BytesWritable b) {
byte[] pairData = new byte[a.getLength()+ b.getLength()];
System.arraycopy(a.getBytes(), 0, pairData, 0, a.getLength());
System.arraycopy(b.getBytes(), 0, pairData, a.getLength(), b.getLength());
return pairData;
}
private static final PathFilter sortPathsFilter = new PathFilter() {
public boolean accept(Path path) {
return (path.getName().startsWith("part-"));
}
};
/**
* A simple map-reduce job which checks consistency of the
* MapReduce framework's sort by checking:
* a) Records are sorted correctly
* b) Keys are partitioned correctly
* c) The input and output have same no. of bytes and records.
* d) The input and output have the correct 'checksum' by xor'ing
* the md5 of each record.
*
*/
public static class RecordStatsChecker {
/**
* Generic way to get <b>raw</b> data from a {@link Writable}.
*/
static class Raw {
/**
* Get raw data bytes from a {@link Writable}
* @param writable {@link Writable} object from whom to get the raw data
* @return raw data of the writable
*/
public byte[] getRawBytes(Writable writable) {
return writable.toString().getBytes();
}
/**
* Get number of raw data bytes of the {@link Writable}
* @param writable {@link Writable} object from whom to get the raw data
* length
* @return number of raw data bytes
*/
public int getRawBytesLength(Writable writable) {
return writable.toString().getBytes().length;
}
}
/**
* Specialization of {@link Raw} for {@link BytesWritable}.
*/
static class RawBytesWritable extends Raw {
public byte[] getRawBytes(Writable bw) {
return ((BytesWritable)bw).getBytes();
}
public int getRawBytesLength(Writable bw) {
return ((BytesWritable)bw).getLength();
}
}
/**
* Specialization of {@link Raw} for {@link Text}.
*/
static class RawText extends Raw {
public byte[] getRawBytes(Writable text) {
return ((Text)text).getBytes();
}
public int getRawBytesLength(Writable text) {
return ((Text)text).getLength();
}
}
private static Raw createRaw(Class rawClass) {
if (rawClass == Text.class) {
return new RawText();
} else if (rawClass == BytesWritable.class) {
System.err.println("Returning " + RawBytesWritable.class);
return new RawBytesWritable();
}
return new Raw();
}
public static class RecordStatsWritable implements Writable {
private long bytes = 0;
private long records = 0;
private int checksum = 0;
public RecordStatsWritable() {}
public RecordStatsWritable(long bytes, long records, int checksum) {
this.bytes = bytes;
this.records = records;
this.checksum = checksum;
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, bytes);
WritableUtils.writeVLong(out, records);
WritableUtils.writeVInt(out, checksum);
}
public void readFields(DataInput in) throws IOException {
bytes = WritableUtils.readVLong(in);
records = WritableUtils.readVLong(in);
checksum = WritableUtils.readVInt(in);
}
public long getBytes() { return bytes; }
public long getRecords() { return records; }
public int getChecksum() { return checksum; }
}
public static class Map extends MapReduceBase
implements Mapper<WritableComparable, Writable,
IntWritable, RecordStatsWritable> {
private IntWritable key = null;
private WritableComparable prevKey = null;
private Class<? extends WritableComparable> keyClass;
private Partitioner<WritableComparable, Writable> partitioner = null;
private int partition = -1;
private int noSortReducers = -1;
private long recordId = -1;
private Raw rawKey;
private Raw rawValue;
public void configure(JobConf job) {
// 'key' == sortInput for sort-input; key == sortOutput for sort-output
key = deduceInputFile(job);
if (key == sortOutput) {
partitioner = new HashPartitioner<WritableComparable, Writable>();
// Figure the 'current' partition and no. of reduces of the 'sort'
try {
URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
String inputFile = inputURI.getPath();
// part file is of the form part-r-xxxxx
partition = Integer.valueOf(inputFile.substring(
inputFile.lastIndexOf("part") + 7)).intValue();
noSortReducers = job.getInt(SORT_REDUCES, -1);
} catch (Exception e) {
System.err.println("Caught: " + e);
System.exit(-1);
}
}
}
@SuppressWarnings("unchecked")
public void map(WritableComparable key, Writable value,
OutputCollector<IntWritable, RecordStatsWritable> output,
Reporter reporter) throws IOException {
// Set up rawKey and rawValue on the first call to 'map'
if (recordId == -1) {
rawKey = createRaw(key.getClass());
rawValue = createRaw(value.getClass());
}
++recordId;
if (this.key == sortOutput) {
// Check if keys are 'sorted' if this
// record is from sort's output
if (prevKey == null) {
prevKey = key;
keyClass = prevKey.getClass();
} else {
// Sanity check
if (keyClass != key.getClass()) {
throw new IOException("Type mismatch in key: expected " +
keyClass.getName() + ", received " +
key.getClass().getName());
}
// Check if they were sorted correctly
if (prevKey.compareTo(key) > 0) {
throw new IOException("The 'map-reduce' framework wrongly" +
" classifed (" + prevKey + ") > (" +
key + ") "+ "for record# " + recordId);
}
prevKey = key;
}
// Check if the sorted output is 'partitioned' right
int keyPartition =
partitioner.getPartition(key, value, noSortReducers);
if (partition != keyPartition) {
throw new IOException("Partitions do not match for record# " +
recordId + " ! - '" + partition + "' v/s '" +
keyPartition + "'");
}
}
// Construct the record-stats and output (this.key, record-stats)
byte[] keyBytes = rawKey.getRawBytes(key);
int keyBytesLen = rawKey.getRawBytesLength(key);
byte[] valueBytes = rawValue.getRawBytes(value);
int valueBytesLen = rawValue.getRawBytesLength(value);
int keyValueChecksum =
(WritableComparator.hashBytes(keyBytes, keyBytesLen) ^
WritableComparator.hashBytes(valueBytes, valueBytesLen));
output.collect(this.key,
new RecordStatsWritable((keyBytesLen+valueBytesLen),
1, keyValueChecksum)
);
}
}
public static class Reduce extends MapReduceBase
implements Reducer<IntWritable, RecordStatsWritable,
IntWritable, RecordStatsWritable> {
public void reduce(IntWritable key, Iterator<RecordStatsWritable> values,
OutputCollector<IntWritable,
RecordStatsWritable> output,
Reporter reporter) throws IOException {
long bytes = 0;
long records = 0;
int xor = 0;
while (values.hasNext()) {
RecordStatsWritable stats = values.next();
bytes += stats.getBytes();
records += stats.getRecords();
xor ^= stats.getChecksum();
}
output.collect(key, new RecordStatsWritable(bytes, records, xor));
}
}
public static class NonSplitableSequenceFileInputFormat
extends SequenceFileInputFormat {
protected boolean isSplitable(FileSystem fs, Path filename) {
return false;
}
}
static void checkRecords(Configuration defaults,
Path sortInput, Path sortOutput) throws IOException {
FileSystem inputfs = sortInput.getFileSystem(defaults);
FileSystem outputfs = sortOutput.getFileSystem(defaults);
FileSystem defaultfs = FileSystem.get(defaults);
JobConf jobConf = new JobConf(defaults, RecordStatsChecker.class);
jobConf.setJobName("sortvalidate-recordstats-checker");
int noSortReduceTasks =
outputfs.listStatus(sortOutput, sortPathsFilter).length;
jobConf.setInt(SORT_REDUCES, noSortReduceTasks);
int noSortInputpaths = inputfs.listStatus(sortInput).length;
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
jobConf.setOutputKeyClass(IntWritable.class);
jobConf.setOutputValueClass(RecordStatsChecker.RecordStatsWritable.class);
jobConf.setMapperClass(Map.class);
jobConf.setCombinerClass(Reduce.class);
jobConf.setReducerClass(Reduce.class);
jobConf.setNumMapTasks(noSortReduceTasks);
jobConf.setNumReduceTasks(1);
FileInputFormat.setInputPaths(jobConf, sortInput);
FileInputFormat.addInputPath(jobConf, sortOutput);
Path outputPath = new Path(new Path("/tmp",
"sortvalidate"), UUID.randomUUID().toString());
if (defaultfs.exists(outputPath)) {
defaultfs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
// Uncomment to run locally in a single process
//job_conf.set(JTConfig.JT, "local");
Path[] inputPaths = FileInputFormat.getInputPaths(jobConf);
System.out.println("\nSortValidator.RecordStatsChecker: Validate sort " +
"from " + inputPaths[0] + " (" +
noSortInputpaths + " files), " +
inputPaths[1] + " (" +
noSortReduceTasks +
" files) into " +
FileOutputFormat.getOutputPath(jobConf) +
" with 1 reducer.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
try {
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
// Check to ensure that the statistics of the
// framework's sort-input and sort-output match
SequenceFile.Reader stats = new SequenceFile.Reader(defaultfs,
new Path(outputPath, "part-00000"), defaults);
try {
IntWritable k1 = new IntWritable();
IntWritable k2 = new IntWritable();
RecordStatsWritable v1 = new RecordStatsWritable();
RecordStatsWritable v2 = new RecordStatsWritable();
if (!stats.next(k1, v1)) {
throw new IOException(
"Failed to read record #1 from reduce's output");
}
if (!stats.next(k2, v2)) {
throw new IOException(
"Failed to read record #2 from reduce's output");
}
if ((v1.getBytes() != v2.getBytes()) ||
(v1.getRecords() != v2.getRecords()) ||
v1.getChecksum() != v2.getChecksum()) {
throw new IOException("(" +
v1.getBytes() + ", " + v1.getRecords() + ", " + v1.getChecksum()
+ ") v/s (" +
v2.getBytes() + ", " + v2.getRecords() + ", " + v2.getChecksum()
+ ")");
}
} finally {
stats.close();
}
} finally {
defaultfs.delete(outputPath, true);
}
}
}
/**
* A simple map-reduce task to check if the input and the output
* of the framework's sort is consistent by ensuring each record
* is present in both the input and the output.
*
*/
public static class RecordChecker {
public static class Map extends MapReduceBase
implements Mapper<BytesWritable, BytesWritable,
BytesWritable, IntWritable> {
private IntWritable value = null;
public void configure(JobConf job) {
// value == one for sort-input; value == two for sort-output
value = deduceInputFile(job);
}
public void map(BytesWritable key,
BytesWritable value,
OutputCollector<BytesWritable, IntWritable> output,
Reporter reporter) throws IOException {
// newKey = (key, value)
BytesWritable keyValue = new BytesWritable(pair(key, value));
// output (newKey, value)
output.collect(keyValue, this.value);
}
}
public static class Reduce extends MapReduceBase
implements Reducer<BytesWritable, IntWritable,
BytesWritable, IntWritable> {
public void reduce(BytesWritable key, Iterator<IntWritable> values,
OutputCollector<BytesWritable, IntWritable> output,
Reporter reporter) throws IOException {
int ones = 0;
int twos = 0;
while (values.hasNext()) {
IntWritable count = values.next();
if (count.equals(sortInput)) {
++ones;
} else if (count.equals(sortOutput)) {
++twos;
} else {
throw new IOException("Invalid 'value' of " + count.get() +
" for (key,value): " + key.toString());
}
}
// Check to ensure there are equal no. of ones and twos
if (ones != twos) {
throw new IOException("Illegal ('one', 'two'): (" + ones + ", " + twos +
") for (key, value): " + key.toString());
}
}
}
static void checkRecords(Configuration defaults, int noMaps, int noReduces,
Path sortInput, Path sortOutput) throws IOException {
JobConf jobConf = new JobConf(defaults, RecordChecker.class);
jobConf.setJobName("sortvalidate-record-checker");
jobConf.setInputFormat(SequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(IntWritable.class);
jobConf.setMapperClass(Map.class);
jobConf.setReducerClass(Reduce.class);
JobClient client = new JobClient(jobConf);
ClusterStatus cluster = client.getClusterStatus();
if (noMaps == -1) {
noMaps = cluster.getTaskTrackers() *
jobConf.getInt(MAPS_PER_HOST, 10);
}
if (noReduces == -1) {
noReduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sortReduces = jobConf.get(REDUCES_PER_HOST);
if (sortReduces != null) {
noReduces = cluster.getTaskTrackers() *
Integer.parseInt(sortReduces);
}
}
jobConf.setNumMapTasks(noMaps);
jobConf.setNumReduceTasks(noReduces);
FileInputFormat.setInputPaths(jobConf, sortInput);
FileInputFormat.addInputPath(jobConf, sortOutput);
Path outputPath = new Path("/tmp/sortvalidate/recordchecker");
FileSystem fs = FileSystem.get(defaults);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
// Uncomment to run locally in a single process
//job_conf.set(JTConfig.JT, "local");
Path[] inputPaths = FileInputFormat.getInputPaths(jobConf);
System.out.println("\nSortValidator.RecordChecker: Running on " +
cluster.getTaskTrackers() +
" nodes to validate sort from " +
inputPaths[0] + ", " +
inputPaths[1] + " into " +
FileOutputFormat.getOutputPath(jobConf) +
" with " + noReduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
}
}
/**
* The main driver for sort-validator program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
Configuration defaults = getConf();
int noMaps = -1, noReduces = -1;
Path sortInput = null, sortOutput = null;
boolean deepTest = false;
for(int i=0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
noMaps = Integer.parseInt(args[++i]);
} else if ("-r".equals(args[i])) {
noReduces = Integer.parseInt(args[++i]);
} else if ("-sortInput".equals(args[i])){
sortInput = new Path(args[++i]);
} else if ("-sortOutput".equals(args[i])){
sortOutput = new Path(args[++i]);
} else if ("-deep".equals(args[i])) {
deepTest = true;
} else {
printUsage();
return -1;
}
} catch (NumberFormatException except) {
System.err.println("ERROR: Integer expected instead of " + args[i]);
printUsage();
return -1;
} catch (ArrayIndexOutOfBoundsException except) {
System.err.println("ERROR: Required parameter missing from " +
args[i-1]);
printUsage();
return -1;
}
}
// Sanity check
if (sortInput == null || sortOutput == null) {
printUsage();
return -2;
}
// Check if the records are consistent and sorted correctly
RecordStatsChecker.checkRecords(defaults, sortInput, sortOutput);
// Check if the same records are present in sort's inputs & outputs
if (deepTest) {
RecordChecker.checkRecords(defaults, noMaps, noReduces, sortInput,
sortOutput);
}
System.out.println("\nSUCCESS! Validated the MapReduce framework's 'sort'" +
" successfully.");
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new SortValidator(), args);
System.exit(res);
}
}
| 22,713 | 36.983278 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestFixedLengthInputFormat {
private static Log LOG;
private static Configuration defaultConf;
private static FileSystem localFs;
private static Path workDir;
private static Reporter voidReporter;
// some chars for the record data
private static char[] chars;
private static Random charRand;
@BeforeClass
public static void onlyOnce() {
try {
LOG = LogFactory.getLog(TestFixedLengthInputFormat.class.getName());
defaultConf = new Configuration();
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
voidReporter = Reporter.NULL;
// our set of chars
chars = ("abcdefghijklmnopqrstuvABCDEFGHIJKLMN OPQRSTUVWXYZ1234567890)"
+ "(*&^%$#@!-=><?:\"{}][';/.,']").toCharArray();
workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestKeyValueFixedLengthInputFormat");
charRand = new Random();
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
/**
* 20 random tests of various record, file, and split sizes. All tests have
* uncompressed file as input.
*/
@Test (timeout=500000)
public void testFormat() throws IOException {
runRandomTests(null);
}
/**
* 20 random tests of various record, file, and split sizes. All tests have
* compressed file as input.
*/
@Test (timeout=500000)
public void testFormatCompressedIn() throws IOException {
runRandomTests(new GzipCodec());
}
/**
* Test with no record length set.
*/
@Test (timeout=5000)
public void testNoRecordLength() throws IOException {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
// Set the fixed length record length config property
JobConf job = new JobConf(defaultConf);
FileInputFormat.setInputPaths(job, workDir);
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.configure(job);
InputSplit splits[] = format.getSplits(job, 1);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
RecordReader<LongWritable, BytesWritable> reader =
format.getRecordReader(split, job, voidReporter);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for not setting record length:", exceptionThrown);
}
/**
* Test with record length set to 0
*/
@Test (timeout=5000)
public void testZeroRecordLength() throws IOException {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
// Set the fixed length record length config property
JobConf job = new JobConf(defaultConf);
FileInputFormat.setInputPaths(job, workDir);
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job, 0);
format.configure(job);
InputSplit splits[] = format.getSplits(job, 1);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
RecordReader<LongWritable, BytesWritable> reader =
format.getRecordReader(split, job, voidReporter);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for zero record length:", exceptionThrown);
}
/**
* Test with record length set to a negative value
*/
@Test (timeout=5000)
public void testNegativeRecordLength() throws IOException {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
// Set the fixed length record length config property
JobConf job = new JobConf(defaultConf);
FileInputFormat.setInputPaths(job, workDir);
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job, -10);
format.configure(job);
InputSplit splits[] = format.getSplits(job, 1);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
RecordReader<LongWritable, BytesWritable> reader =
format.getRecordReader(split, job, voidReporter);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for negative record length:", exceptionThrown);
}
/**
* Test with partial record at the end of a compressed input file.
*/
@Test (timeout=5000)
public void testPartialRecordCompressedIn() throws IOException {
CompressionCodec gzip = new GzipCodec();
runPartialRecordTest(gzip);
}
/**
* Test with partial record at the end of an uncompressed input file.
*/
@Test (timeout=5000)
public void testPartialRecordUncompressedIn() throws IOException {
runPartialRecordTest(null);
}
/**
* Test using the gzip codec with two input files.
*/
@Test (timeout=5000)
public void testGzipWithTwoInputs() throws IOException {
CompressionCodec gzip = new GzipCodec();
localFs.delete(workDir, true);
FixedLengthInputFormat format = new FixedLengthInputFormat();
JobConf job = new JobConf(defaultConf);
format.setRecordLength(job, 5);
FileInputFormat.setInputPaths(job, workDir);
ReflectionUtils.setConf(gzip, job);
format.configure(job);
// Create files with fixed length records with 5 byte long records.
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"one two threefour five six seveneightnine ten ");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"ten nine eightsevensix five four threetwo one ");
InputSplit[] splits = format.getSplits(job, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0] = splits[1];
splits[1] = tmp;
}
List<String> results = readSplit(format, splits[0], job);
assertEquals("splits[0] length", 10, results.size());
assertEquals("splits[0][5]", "six ", results.get(5));
results = readSplit(format, splits[1], job);
assertEquals("splits[1] length", 10, results.size());
assertEquals("splits[1][0]", "ten ", results.get(0));
assertEquals("splits[1][1]", "nine ", results.get(1));
}
// Create a file containing fixed length records with random data
private ArrayList<String> createFile(Path targetFile, CompressionCodec codec,
int recordLen,
int numRecords) throws IOException {
ArrayList<String> recordList = new ArrayList<String>(numRecords);
OutputStream ostream = localFs.create(targetFile);
if (codec != null) {
ostream = codec.createOutputStream(ostream);
}
Writer writer = new OutputStreamWriter(ostream);
try {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < recordLen; j++) {
sb.append(chars[charRand.nextInt(chars.length)]);
}
String recordData = sb.toString();
recordList.add(recordData);
writer.write(recordData);
sb.setLength(0);
}
} finally {
writer.close();
}
return recordList;
}
private void runRandomTests(CompressionCodec codec) throws IOException {
StringBuilder fileName = new StringBuilder("testFormat.txt");
if (codec != null) {
fileName.append(".gz");
}
localFs.delete(workDir, true);
Path file = new Path(workDir, fileName.toString());
int seed = new Random().nextInt();
LOG.info("Seed = " + seed);
Random random = new Random(seed);
int MAX_TESTS = 20;
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
for (int i = 0; i < MAX_TESTS; i++) {
LOG.info("----------------------------------------------------------");
// Maximum total records of 999
int totalRecords = random.nextInt(999)+1;
// Test an empty file
if (i == 8) {
totalRecords = 0;
}
// Maximum bytes in a record of 100K
int recordLength = random.nextInt(1024*100)+1;
// For the 11th test, force a record length of 1
if (i == 10) {
recordLength = 1;
}
// The total bytes in the test file
int fileSize = (totalRecords * recordLength);
LOG.info("totalRecords=" + totalRecords + " recordLength="
+ recordLength);
// Create the job
JobConf job = new JobConf(defaultConf);
if (codec != null) {
ReflectionUtils.setConf(codec, job);
}
// Create the test file
ArrayList<String> recordList
= createFile(file, codec, recordLength, totalRecords);
assertTrue(localFs.exists(file));
//set the fixed length record length config property for the job
FixedLengthInputFormat.setRecordLength(job, recordLength);
int numSplits = 1;
// Arbitrarily set number of splits.
if (i > 0) {
if (i == (MAX_TESTS-1)) {
// Test a split size that is less than record len
numSplits = (int)(fileSize/Math.floor(recordLength/2));
} else {
if (MAX_TESTS % i == 0) {
// Let us create a split size that is forced to be
// smaller than the end file itself, (ensures 1+ splits)
numSplits = fileSize/(fileSize - random.nextInt(fileSize));
} else {
// Just pick a random split size with no upper bound
numSplits = Math.max(1, fileSize/random.nextInt(Integer.MAX_VALUE));
}
}
LOG.info("Number of splits set to: " + numSplits);
}
// Setup the input path
FileInputFormat.setInputPaths(job, workDir);
// Try splitting the file in a variety of sizes
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.configure(job);
InputSplit splits[] = format.getSplits(job, numSplits);
LOG.info("Actual number of splits = " + splits.length);
// Test combined split lengths = total file size
long recordOffset = 0;
int recordNumber = 0;
for (InputSplit split : splits) {
RecordReader<LongWritable, BytesWritable> reader =
format.getRecordReader(split, job, voidReporter);
Class<?> clazz = reader.getClass();
assertEquals("RecordReader class should be FixedLengthRecordReader:",
FixedLengthRecordReader.class, clazz);
// Plow through the records in this split
while (reader.next(key, value)) {
assertEquals("Checking key", (long)(recordNumber*recordLength),
key.get());
String valueString =
new String(value.getBytes(), 0, value.getLength());
assertEquals("Checking record length:", recordLength,
value.getLength());
assertTrue("Checking for more records than expected:",
recordNumber < totalRecords);
String origRecord = recordList.get(recordNumber);
assertEquals("Checking record content:", origRecord, valueString);
recordNumber++;
}
reader.close();
}
assertEquals("Total original records should be total read records:",
recordList.size(), recordNumber);
}
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static List<String> readSplit(FixedLengthInputFormat format,
InputSplit split,
JobConf job) throws IOException {
List<String> result = new ArrayList<String>();
RecordReader<LongWritable, BytesWritable> reader =
format.getRecordReader(split, job, voidReporter);
LongWritable key = reader.createKey();
BytesWritable value = reader.createValue();
try {
while (reader.next(key, value)) {
result.add(new String(value.getBytes(), 0, value.getLength()));
}
} finally {
reader.close();
}
return result;
}
private void runPartialRecordTest(CompressionCodec codec) throws IOException {
localFs.delete(workDir, true);
// Create a file with fixed length records with 5 byte long
// records with a partial record at the end.
StringBuilder fileName = new StringBuilder("testFormat.txt");
if (codec != null) {
fileName.append(".gz");
}
FixedLengthInputFormat format = new FixedLengthInputFormat();
JobConf job = new JobConf(defaultConf);
format.setRecordLength(job, 5);
FileInputFormat.setInputPaths(job, workDir);
if (codec != null) {
ReflectionUtils.setConf(codec, job);
}
format.configure(job);
writeFile(localFs, new Path(workDir, fileName.toString()), codec,
"one two threefour five six seveneightnine ten");
InputSplit[] splits = format.getSplits(job, 100);
if (codec != null) {
assertEquals("compressed splits == 1", 1, splits.length);
}
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
List<String> results = readSplit(format, split, job);
} catch(IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for partial record:", exceptionThrown);
}
}
| 15,463 | 35.819048 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.junit.Assert;
import org.junit.Test;
public class TestClientRedirect {
static {
DefaultMetricsSystem.setMiniClusterMode(true);
}
private static final Log LOG = LogFactory.getLog(TestClientRedirect.class);
private static final String RMADDRESS = "0.0.0.0:8054";
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private static final String AMHOSTADDRESS = "0.0.0.0:10020";
private static final String HSHOSTADDRESS = "0.0.0.0:10021";
private volatile boolean amContact = false;
private volatile boolean hsContact = false;
private volatile boolean amRunning = false;
private volatile boolean amRestarting = false;
@Test
public void testRedirect() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS, RMADDRESS);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, HSHOSTADDRESS);
// Start the RM.
RMService rmService = new RMService("test");
rmService.init(conf);
rmService.start();
// Start the AM.
AMService amService = new AMService();
amService.init(conf);
amService.start(conf);
// Start the HS.
HistoryService historyService = new HistoryService();
historyService.init(conf);
historyService.start(conf);
LOG.info("services started");
Cluster cluster = new Cluster(conf);
org.apache.hadoop.mapreduce.JobID jobID =
new org.apache.hadoop.mapred.JobID("201103121733", 1);
org.apache.hadoop.mapreduce.Counters counters =
cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
LOG.info("Sleeping for 5 seconds before stop for" +
" the client socket to not get EOF immediately..");
Thread.sleep(5000);
//bring down the AM service
amService.stop();
LOG.info("Sleeping for 5 seconds after stop for" +
" the server to exit cleanly..");
Thread.sleep(5000);
amRestarting = true;
// Same client
//results are returned from fake (not started job)
counters = cluster.getJob(jobID).getCounters();
Assert.assertEquals(0, counters.countCounters());
Job job = cluster.getJob(jobID);
org.apache.hadoop.mapreduce.TaskID taskId =
new org.apache.hadoop.mapreduce.TaskID(jobID, TaskType.MAP, 0);
TaskAttemptID tId = new TaskAttemptID(taskId, 0);
//invoke all methods to check that no exception is thrown
job.killJob();
job.killTask(tId);
job.failTask(tId);
job.getTaskCompletionEvents(0, 100);
job.getStatus();
job.getTaskDiagnostics(tId);
job.getTaskReports(TaskType.MAP);
job.getTrackingURL();
amRestarting = false;
amService = new AMService();
amService.init(conf);
amService.start(conf);
amContact = false; //reset
counters = cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
// Stop the AM. It is not even restarting. So it should be treated as
// completed.
amService.stop();
// Same client
counters = cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(hsContact);
rmService.stop();
historyService.stop();
}
private void validateCounters(org.apache.hadoop.mapreduce.Counters counters) {
Iterator<org.apache.hadoop.mapreduce.CounterGroup> it = counters.iterator();
while (it.hasNext()) {
org.apache.hadoop.mapreduce.CounterGroup group = it.next();
LOG.info("Group " + group.getDisplayName());
Iterator<org.apache.hadoop.mapreduce.Counter> itc = group.iterator();
while (itc.hasNext()) {
LOG.info("Counter is " + itc.next().getDisplayName());
}
}
Assert.assertEquals(1, counters.countCounters());
}
class RMService extends AbstractService implements ApplicationClientProtocol {
private String clientServiceBindAddress;
InetSocketAddress clientBindAddress;
private Server server;
public RMService(String name) {
super(name);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
clientServiceBindAddress = RMADDRESS;
/*
clientServiceBindAddress = conf.get(
YarnConfiguration.APPSMANAGER_ADDRESS,
YarnConfiguration.DEFAULT_APPSMANAGER_BIND_ADDRESS);
*/
clientBindAddress = NetUtils.createSocketAddr(clientServiceBindAddress);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
// All the clients to appsManager are supposed to be authenticated via
// Kerberos if security is enabled, so no secretManager.
YarnRPC rpc = YarnRPC.create(getConfig());
Configuration clientServerConf = new Configuration(getConfig());
this.server = rpc.getServer(ApplicationClientProtocol.class, this,
clientBindAddress, clientServerConf, null, 1);
this.server.start();
super.serviceStart();
}
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws IOException {
return null;
}
@Override
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws IOException {
ApplicationId applicationId = request.getApplicationId();
ApplicationReport application = recordFactory
.newRecordInstance(ApplicationReport.class);
application.setApplicationId(applicationId);
application.setFinalApplicationStatus(FinalApplicationStatus.UNDEFINED);
if (amRunning) {
application.setYarnApplicationState(YarnApplicationState.RUNNING);
} else if (amRestarting) {
application.setYarnApplicationState(YarnApplicationState.SUBMITTED);
} else {
application.setYarnApplicationState(YarnApplicationState.FINISHED);
application.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
}
String[] split = AMHOSTADDRESS.split(":");
application.setHost(split[0]);
application.setRpcPort(Integer.parseInt(split[1]));
application.setUser("TestClientRedirect-user");
application.setName("N/A");
application.setQueue("N/A");
application.setStartTime(0);
application.setFinishTime(0);
application.setTrackingUrl("N/A");
application.setDiagnostics("N/A");
GetApplicationReportResponse response = recordFactory
.newRecordInstance(GetApplicationReportResponse.class);
response.setApplicationReport(application);
return response;
}
@Override
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request) throws IOException {
throw new IOException("Test");
}
@Override
public KillApplicationResponse forceKillApplication(
KillApplicationRequest request) throws IOException {
return KillApplicationResponse.newInstance(true);
}
@Override
public GetClusterMetricsResponse getClusterMetrics(
GetClusterMetricsRequest request) throws IOException {
return null;
}
@Override
public GetApplicationsResponse getApplications(
GetApplicationsRequest request) throws IOException {
return null;
}
@Override
public GetClusterNodesResponse getClusterNodes(
GetClusterNodesRequest request) throws IOException {
return null;
}
@Override
public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
throws IOException {
return null;
}
@Override
public GetQueueUserAclsInfoResponse getQueueUserAcls(
GetQueueUserAclsInfoRequest request) throws IOException {
return null;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws IOException {
return null;
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
return null;
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException {
return null;
}
@Override
public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(
MoveApplicationAcrossQueuesRequest request) throws YarnException, IOException {
return null;
}
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException {
return null;
}
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException,
IOException {
return null;
}
@Override
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException {
return null;
}
@Override
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException {
return null;
}
@Override
public ReservationSubmissionResponse submitReservation(
ReservationSubmissionRequest request) throws YarnException, IOException {
return null;
}
@Override
public ReservationUpdateResponse updateReservation(
ReservationUpdateRequest request) throws YarnException, IOException {
return null;
}
@Override
public ReservationDeleteResponse deleteReservation(
ReservationDeleteRequest request) throws YarnException, IOException {
return null;
}
@Override
public GetNodesToLabelsResponse getNodeToLabels(
GetNodesToLabelsRequest request) throws YarnException, IOException {
return null;
}
@Override
public GetClusterNodeLabelsResponse getClusterNodeLabels(
GetClusterNodeLabelsRequest request) throws YarnException, IOException {
return null;
}
@Override
public GetLabelsToNodesResponse getLabelsToNodes(
GetLabelsToNodesRequest request) throws YarnException, IOException {
return null;
}
}
class HistoryService extends AMService implements HSClientProtocol {
public HistoryService() {
super(HSHOSTADDRESS);
this.protocol = HSClientProtocol.class;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
hsContact = true;
Counters counters = getMyCounters();
GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
response.setCounters(counters);
return response;
}
}
class AMService extends AbstractService
implements MRClientProtocol {
protected Class<?> protocol;
private InetSocketAddress bindAddress;
private Server server;
private final String hostAddress;
public AMService() {
this(AMHOSTADDRESS);
}
@Override
public InetSocketAddress getConnectAddress() {
return bindAddress;
}
public AMService(String hostAddress) {
super("AMService");
this.protocol = MRClientProtocol.class;
this.hostAddress = hostAddress;
}
public void start(Configuration conf) {
YarnRPC rpc = YarnRPC.create(conf);
//TODO : use fixed port ??
InetSocketAddress address = NetUtils.createSocketAddr(hostAddress);
InetAddress hostNameResolved = null;
try {
address.getAddress();
hostNameResolved = InetAddress.getLocalHost();
} catch (UnknownHostException e) {
throw new YarnRuntimeException(e);
}
server =
rpc.getServer(protocol, this, address,
conf, null, 1);
server.start();
this.bindAddress = NetUtils.getConnectAddress(server);
super.start();
amRunning = true;
}
@Override
protected void serviceStop() throws Exception {
if (server != null) {
server.stop();
}
super.serviceStop();
amRunning = false;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
JobId jobID = request.getJobId();
amContact = true;
Counters counters = getMyCounters();
GetCountersResponse response = recordFactory
.newRecordInstance(GetCountersResponse.class);
response.setCounters(counters);
return response;
}
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws IOException {
amContact = true;
JobReport jobReport = recordFactory.newRecordInstance(JobReport.class);
jobReport.setJobId(request.getJobId());
jobReport.setJobState(JobState.RUNNING);
jobReport.setJobName("TestClientRedirect-jobname");
jobReport.setUser("TestClientRedirect-user");
jobReport.setStartTime(0L);
jobReport.setFinishTime(1L);
GetJobReportResponse response = recordFactory
.newRecordInstance(GetJobReportResponse.class);
response.setJobReport(jobReport);
return response;
}
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
throws IOException {
return null;
}
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
GetTaskAttemptReportRequest request) throws IOException {
return null;
}
@Override
public GetTaskAttemptCompletionEventsResponse
getTaskAttemptCompletionEvents(
GetTaskAttemptCompletionEventsRequest request)
throws IOException {
return null;
}
@Override
public GetTaskReportsResponse
getTaskReports(GetTaskReportsRequest request)
throws IOException {
return null;
}
@Override
public GetDiagnosticsResponse
getDiagnostics(GetDiagnosticsRequest request)
throws IOException {
return null;
}
@Override
public KillJobResponse killJob(KillJobRequest request)
throws IOException {
return recordFactory.newRecordInstance(KillJobResponse.class);
}
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
return null;
}
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws IOException {
return null;
}
@Override
public FailTaskAttemptResponse failTaskAttempt(
FailTaskAttemptRequest request) throws IOException {
return null;
}
@Override
public org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse getDelegationToken(
org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest request)
throws IOException {
return null;
}
@Override
public org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse renewDelegationToken(
org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest request)
throws IOException {
return null;
}
@Override
public org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse cancelDelegationToken(
org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest request)
throws IOException {
return null;
}
}
static Counters getMyCounters() {
Counter counter = recordFactory.newRecordInstance(Counter.class);
counter.setName("Mycounter");
counter.setDisplayName("My counter display name");
counter.setValue(12345);
CounterGroup group = recordFactory
.newRecordInstance(CounterGroup.class);
group.setName("MyGroup");
group.setDisplayName("My groupd display name");
group.setCounter("myCounter", counter);
Counters counters = recordFactory.newRecordInstance(Counters.class);
counters.setCounterGroup("myGroupd", group);
return counters;
}
}
| 24,311 | 36.460709 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.util.JarFinder;
/**
* A MiniMRCluster factory. In MR2, it provides a wrapper MiniMRClientCluster
* interface around the MiniMRYarnCluster. While in MR1, it provides such
* wrapper around MiniMRCluster. This factory should be used in tests to provide
* an easy migration of tests across MR1 and MR2.
*/
public class MiniMRClientClusterFactory {
public static MiniMRClientCluster create(Class<?> caller, int noOfNMs,
Configuration conf) throws IOException {
return create(caller, caller.getSimpleName(), noOfNMs, conf);
}
public static MiniMRClientCluster create(Class<?> caller, String identifier,
int noOfNMs, Configuration conf) throws IOException {
if (conf == null) {
conf = new Configuration();
}
FileSystem fs = FileSystem.get(conf);
Path testRootDir = new Path("target", identifier + "-tmpDir")
.makeQualified(fs);
Path appJar = new Path(testRootDir, "MRAppJar.jar");
// Copy MRAppJar and make it private.
Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);
fs.copyFromLocalFile(appMasterJar, appJar);
fs.setPermission(appJar, new FsPermission("744"));
Job job = Job.getInstance(conf);
job.addFileToClassPath(appJar);
Path callerJar = new Path(JarFinder.getJar(caller));
Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
fs.copyFromLocalFile(callerJar, remoteCallerJar);
fs.setPermission(remoteCallerJar, new FsPermission("744"));
job.addFileToClassPath(remoteCallerJar);
MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
noOfNMs);
job.getConfiguration().set("minimrclientcluster.caller.name",
identifier);
job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
noOfNMs);
miniMRYarnCluster.init(job.getConfiguration());
miniMRYarnCluster.start();
return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
}
| 3,109 | 35.162791 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestQueueConfigurationParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.StringWriter;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import static org.junit.Assert.*;
import org.junit.Test;
public class TestQueueConfigurationParser {
/**
* test xml generation
* @throws ParserConfigurationException
* @throws Exception
*/
@Test (timeout=5000)
public void testQueueConfigurationParser()
throws ParserConfigurationException, Exception {
JobQueueInfo info = new JobQueueInfo("root", "rootInfo");
JobQueueInfo infoChild1 = new JobQueueInfo("child1", "child1Info");
JobQueueInfo infoChild2 = new JobQueueInfo("child2", "child1Info");
info.addChild(infoChild1);
info.addChild(infoChild2);
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory
.newInstance();
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document document = builder.newDocument();
// test QueueConfigurationParser.getQueueElement
Element e = QueueConfigurationParser.getQueueElement(document, info);
// transform result to string for check
DOMSource domSource = new DOMSource(e);
StringWriter writer = new StringWriter();
StreamResult result = new StreamResult(writer);
TransformerFactory tf = TransformerFactory.newInstance();
Transformer transformer = tf.newTransformer();
transformer.transform(domSource, result);
String str= writer.toString();
assertTrue(str
.endsWith("<queue><name>root</name><properties/><state>running</state><queue><name>child1</name><properties/><state>running</state></queue><queue><name>child2</name><properties/><state>running</state></queue></queue>"));
}
}
| 2,833 | 36.786667 | 228 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.BitSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class TestTextInputFormat {
private static final Log LOG =
LogFactory.getLog(TestTextInputFormat.class.getName());
private static int MAX_LENGTH = 10000;
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
static {
try {
defaultConf.set("fs.defaultFS", "file:///");
localFs = FileSystem.getLocal(defaultConf);
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
@SuppressWarnings("deprecation")
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestTextInputFormat").makeQualified(localFs);
@Test (timeout=500000)
public void testFormat() throws Exception {
JobConf job = new JobConf(defaultConf);
Path file = new Path(workDir, "test.txt");
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
int seed = new Random().nextInt();
LOG.info("seed = "+seed);
Random random = new Random(seed);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(job, workDir);
// for a variety of lengths
for (int length = 0; length < MAX_LENGTH;
length+= random.nextInt(MAX_LENGTH/10)+1) {
LOG.debug("creating; entries = " + length);
// create a file with length entries
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
TextInputFormat format = new TextInputFormat();
format.configure(job);
LongWritable key = new LongWritable();
Text value = new Text();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH/20)+1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(job, numSplits);
LOG.debug("splitting: got = " + splits.length);
if (length == 0) {
assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",
1, splits.length);
assertEquals("Empty file length == 0", 0, splits[0].getLength());
}
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
LOG.debug("split["+j+"]= " + splits[j]);
RecordReader<LongWritable, Text> reader =
format.getRecordReader(splits[j], job, reporter);
try {
int count = 0;
while (reader.next(key, value)) {
int v = Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v +
" in split " + j +
" at position "+reader.getPos());
}
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits["+j+"]="+splits[j]+" count=" + count);
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
@Test (timeout=900000)
public void testSplitableCodecs() throws IOException {
JobConf conf = new JobConf(defaultConf);
int seed = new Random().nextInt();
// Create the codec
CompressionCodec codec = null;
try {
codec = (CompressionCodec)
ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file = new Path(workDir, "test"+codec.getDefaultExtension());
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
LOG.info("seed = "+seed);
Random random = new Random(seed);
FileSystem localFs = FileSystem.getLocal(conf);
localFs.delete(workDir, true);
FileInputFormat.setInputPaths(conf, workDir);
final int MAX_LENGTH = 500000;
// for a variety of lengths
for (int length = MAX_LENGTH / 2; length < MAX_LENGTH;
length += random.nextInt(MAX_LENGTH / 4)+1) {
LOG.info("creating; entries = " + length);
// create a file with length entries
Writer writer =
new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i = 0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
} finally {
writer.close();
}
// try splitting the file in a variety of sizes
TextInputFormat format = new TextInputFormat();
format.configure(conf);
LongWritable key = new LongWritable();
Text value = new Text();
for (int i = 0; i < 3; i++) {
int numSplits = random.nextInt(MAX_LENGTH/2000)+1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits = format.getSplits(conf, numSplits);
LOG.info("splitting: got = " + splits.length);
// check each split
BitSet bits = new BitSet(length);
for (int j = 0; j < splits.length; j++) {
LOG.debug("split["+j+"]= " + splits[j]);
RecordReader<LongWritable, Text> reader =
format.getRecordReader(splits[j], conf, reporter);
try {
int counter = 0;
while (reader.next(key, value)) {
int v = Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v +
" in split " + j +
" at position "+reader.getPos());
}
assertFalse("Key in multiple partitions.", bits.get(v));
bits.set(v);
counter++;
}
if (counter > 0) {
LOG.info("splits["+j+"]="+splits[j]+" count=" + counter);
} else {
LOG.debug("splits["+j+"]="+splits[j]+" count=" + counter);
}
} finally {
reader.close();
}
}
assertEquals("Some keys in no partition.", length, bits.cardinality());
}
}
}
private static LineReader makeStream(String str) throws IOException {
return new LineReader(new ByteArrayInputStream
(str.getBytes("UTF-8")),
defaultConf);
}
private static LineReader makeStream(String str, int bufsz) throws IOException {
return new LineReader(new ByteArrayInputStream
(str.getBytes("UTF-8")),
bufsz);
}
@Test (timeout=5000)
public void testUTF8() throws Exception {
LineReader in = makeStream("abcd\u20acbdcd\u20ac");
Text line = new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters",
"abcd\u20acbdcd\u20ac", line.toString());
in = makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline", "abc\u200axyz", line.toString());
}
/**
* Test readLine for various kinds of line termination sequneces.
* Varies buffer size to stress test. Also check that returned
* value matches the string length.
*
* @throws Exception
*/
@Test (timeout=5000)
public void testNewLines() throws Exception {
final String STR = "a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee";
final int STRLENBYTES = STR.getBytes().length;
Text out = new Text();
for (int bufsz = 1; bufsz < STRLENBYTES+1; ++bufsz) {
LineReader in = makeStream(STR, bufsz);
int c = 0;
c += in.readLine(out); //"a"\n
assertEquals("line1 length, bufsz:"+bufsz, 1, out.getLength());
c += in.readLine(out); //"bb"\n
assertEquals("line2 length, bufsz:"+bufsz, 2, out.getLength());
c += in.readLine(out); //""\n
assertEquals("line3 length, bufsz:"+bufsz, 0, out.getLength());
c += in.readLine(out); //"ccc"\r
assertEquals("line4 length, bufsz:"+bufsz, 3, out.getLength());
c += in.readLine(out); //dddd\r
assertEquals("line5 length, bufsz:"+bufsz, 4, out.getLength());
c += in.readLine(out); //""\r
assertEquals("line6 length, bufsz:"+bufsz, 0, out.getLength());
c += in.readLine(out); //""\r\n
assertEquals("line7 length, bufsz:"+bufsz, 0, out.getLength());
c += in.readLine(out); //""\r\n
assertEquals("line8 length, bufsz:"+bufsz, 0, out.getLength());
c += in.readLine(out); //"eeeee"EOF
assertEquals("line9 length, bufsz:"+bufsz, 5, out.getLength());
assertEquals("end of file, bufsz: "+bufsz, 0, in.readLine(out));
assertEquals("total bytes, bufsz: "+bufsz, c, STRLENBYTES);
}
}
/**
* Test readLine for correct interpretation of maxLineLength
* (returned string should be clipped at maxLineLength, and the
* remaining bytes on the same line should be thrown out).
* Also check that returned value matches the string length.
* Varies buffer size to stress test.
*
* @throws Exception
*/
@Test (timeout=5000)
public void testMaxLineLength() throws Exception {
final String STR = "a\nbb\n\nccc\rdddd\r\neeeee";
final int STRLENBYTES = STR.getBytes().length;
Text out = new Text();
for (int bufsz = 1; bufsz < STRLENBYTES+1; ++bufsz) {
LineReader in = makeStream(STR, bufsz);
int c = 0;
c += in.readLine(out, 1);
assertEquals("line1 length, bufsz: "+bufsz, 1, out.getLength());
c += in.readLine(out, 1);
assertEquals("line2 length, bufsz: "+bufsz, 1, out.getLength());
c += in.readLine(out, 1);
assertEquals("line3 length, bufsz: "+bufsz, 0, out.getLength());
c += in.readLine(out, 3);
assertEquals("line4 length, bufsz: "+bufsz, 3, out.getLength());
c += in.readLine(out, 10);
assertEquals("line5 length, bufsz: "+bufsz, 4, out.getLength());
c += in.readLine(out, 8);
assertEquals("line5 length, bufsz: "+bufsz, 5, out.getLength());
assertEquals("end of file, bufsz: " +bufsz, 0, in.readLine(out));
assertEquals("total bytes, bufsz: "+bufsz, c, STRLENBYTES);
}
}
@Test (timeout=5000)
public void testMRMaxLine() throws Exception {
final int MAXPOS = 1024 * 1024;
final int MAXLINE = 10 * 1024;
final int BUF = 64 * 1024;
final InputStream infNull = new InputStream() {
int position = 0;
final int MAXPOSBUF = 1024 * 1024 + BUF; // max LRR pos + LineReader buf
@Override
public int read() {
++position;
return 0;
}
@Override
public int read(byte[] b) {
assertTrue("Read too many bytes from the stream", position < MAXPOSBUF);
Arrays.fill(b, (byte) 0);
position += b.length;
return b.length;
}
public void reset() {
position=0;
}
};
final LongWritable key = new LongWritable();
final Text val = new Text();
LOG.info("Reading a line from /dev/null");
final Configuration conf = new Configuration(false);
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, MAXLINE);
conf.setInt("io.file.buffer.size", BUF); // used by LRR
// test another constructor
LineRecordReader lrr = new LineRecordReader(infNull, 0, MAXPOS, conf);
assertFalse("Read a line from null", lrr.next(key, val));
infNull.reset();
lrr = new LineRecordReader(infNull, 0L, MAXLINE, MAXPOS);
assertFalse("Read a line from null", lrr.next(key, val));
}
private static void writeFile(FileSystem fs, Path name,
CompressionCodec codec,
String contents) throws IOException {
OutputStream stm;
if (codec == null) {
stm = fs.create(name);
} else {
stm = codec.createOutputStream(fs.create(name));
}
stm.write(contents.getBytes());
stm.close();
}
private static final Reporter voidReporter = Reporter.NULL;
private static List<Text> readSplit(TextInputFormat format,
InputSplit split,
JobConf job) throws IOException {
List<Text> result = new ArrayList<Text>();
RecordReader<LongWritable, Text> reader =
format.getRecordReader(split, job, voidReporter);
LongWritable key = reader.createKey();
Text value = reader.createValue();
while (reader.next(key, value)) {
result.add(value);
value = reader.createValue();
}
reader.close();
return result;
}
/**
* Test using the gzip codec for reading
*/
@Test (timeout=5000)
public void testGzip() throws IOException {
JobConf job = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, job);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job, workDir);
TextInputFormat format = new TextInputFormat();
format.configure(job);
InputSplit[] splits = format.getSplits(job, 100);
assertEquals("compressed splits == 2", 2, splits.length);
FileSplit tmp = (FileSplit) splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0] = splits[1];
splits[1] = tmp;
}
List<Text> results = readSplit(format, splits[0], job);
assertEquals("splits[0] length", 6, results.size());
assertEquals("splits[0][5]", " dog", results.get(5).toString());
results = readSplit(format, splits[1], job);
assertEquals("splits[1] length", 2, results.size());
assertEquals("splits[1][0]", "this is a test",
results.get(0).toString());
assertEquals("splits[1][1]", "of gzip",
results.get(1).toString());
}
/**
* Test using the gzip codec and an empty input file
*/
@Test (timeout=5000)
public void testGzipEmpty() throws IOException {
JobConf job = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, job);
localFs.delete(workDir, true);
writeFile(localFs, new Path(workDir, "empty.gz"), gzip, "");
FileInputFormat.setInputPaths(job, workDir);
TextInputFormat format = new TextInputFormat();
format.configure(job);
InputSplit[] splits = format.getSplits(job, 100);
assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",
1, splits.length);
List<Text> results = readSplit(format, splits[0], job);
assertEquals("Compressed empty file length == 0", 0, results.size());
}
private static String unquote(String in) {
StringBuffer result = new StringBuffer();
for(int i=0; i < in.length(); ++i) {
char ch = in.charAt(i);
if (ch == '\\') {
ch = in.charAt(++i);
switch (ch) {
case 'n':
result.append('\n');
break;
case 'r':
result.append('\r');
break;
default:
result.append(ch);
break;
}
} else {
result.append(ch);
}
}
return result.toString();
}
/**
* Parse the command line arguments into lines and display the result.
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
for(String arg: args) {
System.out.println("Working on " + arg);
LineReader reader = makeStream(unquote(arg));
Text line = new Text();
int size = reader.readLine(line);
while (size > 0) {
System.out.println("Got: " + line.toString());
size = reader.readLine(line);
}
reader.close();
}
}
}
| 17,901 | 34.309665 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.MRConfig;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.fail;
/**
* TestMapOutputType checks whether the Map task handles type mismatch
* between mapper output and the type specified in
* JobConf.MapOutputKeyType and JobConf.MapOutputValueType.
*/
public class TestMapOutputType {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestMapOutputType-mapred");
JobConf conf = new JobConf(TestMapOutputType.class);
JobClient jc;
/**
* TextGen is a Mapper that generates a Text key-value pair. The
* type specified in conf will be anything but.
*/
static class TextGen
implements Mapper<WritableComparable, Writable, Text, Text> {
public void configure(JobConf job) {
}
public void map(WritableComparable key, Writable val,
OutputCollector<Text, Text> out,
Reporter reporter) throws IOException {
out.collect(new Text("Hello"), new Text("World"));
}
public void close() {
}
}
/** A do-nothing reducer class. We won't get this far, really.
*
*/
static class TextReduce
implements Reducer<Text, Text, Text, Text> {
public void configure(JobConf job) {
}
public void reduce(Text key,
Iterator<Text> values,
OutputCollector<Text, Text> out,
Reporter reporter) throws IOException {
out.collect(new Text("Test"), new Text("Me"));
}
public void close() {
}
}
@Before
public void configure() throws Exception {
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.get(conf);
fs.delete(testdir, true);
conf.setInt(JobContext.IO_SORT_MB, 1);
conf.setInputFormat(SequenceFileInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setMapperClass(TextGen.class);
conf.setReducerClass(TextReduce.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.setOutputFormat(SequenceFileOutputFormat.class);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
Path inFile = new Path(inDir, "part0");
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile,
Text.class, Text.class);
writer.append(new Text("rec: 1"), new Text("Hello"));
writer.close();
jc = new JobClient(conf);
}
@After
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
@Test
public void testKeyMismatch() throws Exception {
// Set bad MapOutputKeyClass and MapOutputValueClass
conf.setMapOutputKeyClass(IntWritable.class);
conf.setMapOutputValueClass(IntWritable.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (r_job.isSuccessful()) {
fail("Oops! The job was supposed to break due to an exception");
}
}
@Test
public void testValueMismatch() throws Exception {
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(IntWritable.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (r_job.isSuccessful()) {
fail("Oops! The job was supposed to break due to an exception");
}
}
@Test
public void testNoMismatch() throws Exception{
// Set good MapOutputKeyClass and MapOutputValueClass
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(Text.class);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
}
}
| 5,585 | 30.206704 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFileInputFormatPathFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import java.io.IOException;
import java.io.Writer;
import java.io.OutputStreamWriter;
import java.util.Set;
import java.util.HashSet;
public class TestFileInputFormatPathFilter extends TestCase {
public static class DummyFileInputFormat extends FileInputFormat {
public RecordReader getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
return null;
}
}
private static FileSystem localFs = null;
static {
try {
localFs = FileSystem.getLocal(new JobConf());
} catch (IOException e) {
throw new RuntimeException("init failure", e);
}
}
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestFileInputFormatPathFilter");
public void setUp() throws Exception {
tearDown();
localFs.mkdirs(workDir);
}
public void tearDown() throws Exception {
if (localFs.exists(workDir)) {
localFs.delete(workDir, true);
}
}
protected Path createFile(String fileName) throws IOException {
Path file = new Path(workDir, fileName);
Writer writer = new OutputStreamWriter(localFs.create(file));
writer.write("");
writer.close();
return localFs.makeQualified(file);
}
protected Set<Path> createFiles() throws IOException {
Set<Path> files = new HashSet<Path>();
files.add(createFile("a"));
files.add(createFile("b"));
files.add(createFile("aa"));
files.add(createFile("bb"));
files.add(createFile("_hello"));
files.add(createFile(".hello"));
return files;
}
public static class TestPathFilter implements PathFilter {
public boolean accept(Path path) {
String name = path.getName();
return name.equals("TestFileInputFormatPathFilter") || name.length() == 1;
}
}
private void _testInputFiles(boolean withFilter, boolean withGlob) throws Exception {
Set<Path> createdFiles = createFiles();
JobConf conf = new JobConf();
Path inputDir = (withGlob) ? new Path(workDir, "a*") : workDir;
FileInputFormat.setInputPaths(conf, inputDir);
conf.setInputFormat(DummyFileInputFormat.class);
if (withFilter) {
FileInputFormat.setInputPathFilter(conf, TestPathFilter.class);
}
DummyFileInputFormat inputFormat =
(DummyFileInputFormat) conf.getInputFormat();
Set<Path> computedFiles = new HashSet<Path>();
for (FileStatus file : inputFormat.listStatus(conf)) {
computedFiles.add(file.getPath());
}
createdFiles.remove(localFs.makeQualified(new Path(workDir, "_hello")));
createdFiles.remove(localFs.makeQualified(new Path(workDir, ".hello")));
if (withFilter) {
createdFiles.remove(localFs.makeQualified(new Path(workDir, "aa")));
createdFiles.remove(localFs.makeQualified(new Path(workDir, "bb")));
}
if (withGlob) {
createdFiles.remove(localFs.makeQualified(new Path(workDir, "b")));
createdFiles.remove(localFs.makeQualified(new Path(workDir, "bb")));
}
assertEquals(createdFiles, computedFiles);
}
public void testWithoutPathFilterWithoutGlob() throws Exception {
_testInputFiles(false, false);
}
public void testWithoutPathFilterWithGlob() throws Exception {
_testInputFiles(false, true);
}
public void testWithPathFilterWithoutGlob() throws Exception {
_testInputFiles(true, false);
}
public void testWithPathFilterWithGlob() throws Exception {
_testInputFiles(true, true);
}
}
| 4,559 | 29.810811 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* An adapter for MiniMRYarnCluster providing a MiniMRClientCluster interface.
* This interface could be used by tests across both MR1 and MR2.
*/
public class MiniMRYarnClusterAdapter implements MiniMRClientCluster {
private MiniMRYarnCluster miniMRYarnCluster;
private static final Log LOG = LogFactory.getLog(MiniMRYarnClusterAdapter.class);
public MiniMRYarnClusterAdapter(MiniMRYarnCluster miniMRYarnCluster) {
this.miniMRYarnCluster = miniMRYarnCluster;
}
@Override
public Configuration getConfig() {
return miniMRYarnCluster.getConfig();
}
@Override
public void start() {
miniMRYarnCluster.start();
}
@Override
public void stop() {
miniMRYarnCluster.stop();
}
@Override
public void restart() {
if (!miniMRYarnCluster.getServiceState().equals(STATE.STARTED)){
LOG.warn("Cannot restart the mini cluster, start it first");
return;
}
Configuration oldConf = new Configuration(getConfig());
String callerName = oldConf.get("minimrclientcluster.caller.name",
this.getClass().getName());
int noOfNMs = oldConf.getInt("minimrclientcluster.nodemanagers.number", 1);
oldConf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
oldConf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
stop();
miniMRYarnCluster = new MiniMRYarnCluster(callerName, noOfNMs);
miniMRYarnCluster.init(oldConf);
miniMRYarnCluster.start();
}
}
| 2,673 | 33.727273 | 83 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.