repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.AbstractService;
@Private
@Unstable
/**
* Base class for history server state storage.
* Storage implementations need to implement blocking store and load methods
* to actually store and load the state.
*/
public abstract class HistoryServerStateStoreService extends AbstractService {
public static class HistoryServerState {
Map<MRDelegationTokenIdentifier, Long> tokenState =
new HashMap<MRDelegationTokenIdentifier, Long>();
Set<DelegationKey> tokenMasterKeyState = new HashSet<DelegationKey>();
public Map<MRDelegationTokenIdentifier, Long> getTokenState() {
return tokenState;
}
public Set<DelegationKey> getTokenMasterKeyState() {
return tokenMasterKeyState;
}
}
public HistoryServerStateStoreService() {
super(HistoryServerStateStoreService.class.getName());
}
/**
* Initialize the state storage
*
* @param conf the configuration
* @throws IOException
*/
@Override
public void serviceInit(Configuration conf) throws IOException {
initStorage(conf);
}
/**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart() throws IOException {
startStorage();
}
/**
* Shutdown the state storage.
*
* @throws IOException
*/
@Override
public void serviceStop() throws IOException {
closeStorage();
}
/**
* Implementation-specific initialization.
*
* @param conf the configuration
* @throws IOException
*/
protected abstract void initStorage(Configuration conf) throws IOException;
/**
* Implementation-specific startup.
*
* @throws IOException
*/
protected abstract void startStorage() throws IOException;
/**
* Implementation-specific shutdown.
*
* @throws IOException
*/
protected abstract void closeStorage() throws IOException;
/**
* Load the history server state from the state storage.
*
* @throws IOException
*/
public abstract HistoryServerState loadState() throws IOException;
/**
* Blocking method to store a delegation token along with the current token
* sequence number to the state storage.
*
* Implementations must not return from this method until the token has been
* committed to the state store.
*
* @param tokenId the token to store
* @param renewDate the token renewal deadline
* @throws IOException
*/
public abstract void storeToken(MRDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException;
/**
* Blocking method to update the expiration of a delegation token
* in the state storage.
*
* Implementations must not return from this method until the expiration
* date of the token has been updated in the state store.
*
* @param tokenId the token to update
* @param renewDate the new token renewal deadline
* @throws IOException
*/
public abstract void updateToken(MRDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException;
/**
* Blocking method to remove a delegation token from the state storage.
*
* Implementations must not return from this method until the token has been
* removed from the state store.
*
* @param tokenId the token to remove
* @throws IOException
*/
public abstract void removeToken(MRDelegationTokenIdentifier tokenId)
throws IOException;
/**
* Blocking method to store a delegation token master key.
*
* Implementations must not return from this method until the key has been
* committed to the state store.
*
* @param key the master key to store
* @throws IOException
*/
public abstract void storeTokenMasterKey(
DelegationKey key) throws IOException;
/**
* Blocking method to remove a delegation token master key.
*
* Implementations must not return from this method until the key has been
* removed from the state store.
*
* @param key the master key to remove
* @throws IOException
*/
public abstract void removeTokenMasterKey(DelegationKey key)
throws IOException;
}
| 5,425 | 28.32973 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.AccessControlException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.security.authorize.ClientHSPolicyProvider;
import org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebApp;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebApps;
import com.google.common.annotations.VisibleForTesting;
/**
* This module is responsible for talking to the
* JobClient (user facing).
*
*/
public class HistoryClientService extends AbstractService {
private static final Log LOG = LogFactory.getLog(HistoryClientService.class);
private HSClientProtocol protocolHandler;
private Server server;
private WebApp webApp;
private InetSocketAddress bindAddress;
private HistoryContext history;
private JHSDelegationTokenSecretManager jhsDTSecretManager;
public HistoryClientService(HistoryContext history,
JHSDelegationTokenSecretManager jhsDTSecretManager) {
super("HistoryClientService");
this.history = history;
this.protocolHandler = new HSClientProtocolHandler();
this.jhsDTSecretManager = jhsDTSecretManager;
}
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
initializeWebApp(conf);
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
server =
rpc.getServer(HSClientProtocol.class, protocolHandler, address,
conf, jhsDTSecretManager,
conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT,
JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
server.refreshServiceAcl(conf, new ClientHSPolicyProvider());
}
server.start();
this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
server.getListenerAddress());
LOG.info("Instantiated HistoryClientService at " + this.bindAddress);
super.serviceStart();
}
@VisibleForTesting
protected void initializeWebApp(Configuration conf) {
webApp = new HsWebApp(history);
InetSocketAddress bindAddress = MRWebAppUtil.getJHSWebBindAddress(conf);
// NOTE: there should be a .at(InetSocketAddress)
WebApps
.$for("jobhistory", HistoryClientService.class, this, "ws")
.with(conf)
.withHttpSpnegoKeytabKey(
JHAdminConfig.MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.withHttpSpnegoPrincipalKey(
JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
.at(NetUtils.getHostPortString(bindAddress)).start(webApp);
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf,
connectHost + ":" + webApp.getListenerAddress().getPort());
}
@Override
protected void serviceStop() throws Exception {
if (server != null) {
server.stop();
}
if (webApp != null) {
webApp.stop();
}
super.serviceStop();
}
@Private
public MRClientProtocol getClientHandler() {
return this.protocolHandler;
}
@Private
public InetSocketAddress getBindAddress() {
return this.bindAddress;
}
private class HSClientProtocolHandler implements HSClientProtocol {
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
public InetSocketAddress getConnectAddress() {
return getBindAddress();
}
private Job verifyAndGetJob(final JobId jobID, boolean exceptionThrow)
throws IOException {
UserGroupInformation loginUgi = null;
Job job = null;
try {
loginUgi = UserGroupInformation.getLoginUser();
job = loginUgi.doAs(new PrivilegedExceptionAction<Job>() {
@Override
public Job run() throws Exception {
Job job = history.getJob(jobID);
return job;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
if (job == null && exceptionThrow) {
throw new IOException("Unknown Job " + jobID);
}
if (job != null) {
JobACL operation = JobACL.VIEW_JOB;
checkAccess(job, operation);
}
return job;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
JobId jobId = request.getJobId();
Job job = verifyAndGetJob(jobId, true);
GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
return response;
}
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws IOException {
JobId jobId = request.getJobId();
Job job = verifyAndGetJob(jobId, false);
GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class);
if (job != null) {
response.setJobReport(job.getReport());
}
else {
response.setJobReport(null);
}
return response;
}
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
GetTaskAttemptReportRequest request) throws IOException {
TaskAttemptId taskAttemptId = request.getTaskAttemptId();
Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId(), true);
GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class);
response.setTaskAttemptReport(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getReport());
return response;
}
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
throws IOException {
TaskId taskId = request.getTaskId();
Job job = verifyAndGetJob(taskId.getJobId(), true);
GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class);
response.setTaskReport(job.getTask(taskId).getReport());
return response;
}
@Override
public GetTaskAttemptCompletionEventsResponse
getTaskAttemptCompletionEvents(
GetTaskAttemptCompletionEventsRequest request) throws IOException {
JobId jobId = request.getJobId();
int fromEventId = request.getFromEventId();
int maxEvents = request.getMaxEvents();
Job job = verifyAndGetJob(jobId, true);
GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
return response;
}
@Override
public KillJobResponse killJob(KillJobRequest request) throws IOException {
throw new IOException("Invalid operation on completed job");
}
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
throw new IOException("Invalid operation on completed job");
}
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws IOException {
throw new IOException("Invalid operation on completed job");
}
@Override
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
throws IOException {
TaskAttemptId taskAttemptId = request.getTaskAttemptId();
Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId(), true);
GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class);
response.addAllDiagnostics(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getDiagnostics());
return response;
}
@Override
public FailTaskAttemptResponse failTaskAttempt(
FailTaskAttemptRequest request) throws IOException {
throw new IOException("Invalid operation on completed job");
}
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
throws IOException {
JobId jobId = request.getJobId();
TaskType taskType = request.getTaskType();
GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
Job job = verifyAndGetJob(jobId, true);
Collection<Task> tasks = job.getTasks(taskType).values();
for (Task task : tasks) {
response.addTaskReport(task.getReport());
}
return response;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// Verify that the connection is kerberos authenticated
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be issued only with kerberos authentication");
}
GetDelegationTokenResponse response = recordFactory.newRecordInstance(
GetDelegationTokenResponse.class);
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
MRDelegationTokenIdentifier tokenIdentifier =
new MRDelegationTokenIdentifier(owner, new Text(
request.getRenewer()), realUser);
Token<MRDelegationTokenIdentifier> realJHSToken =
new Token<MRDelegationTokenIdentifier>(tokenIdentifier,
jhsDTSecretManager);
org.apache.hadoop.yarn.api.records.Token mrDToken =
org.apache.hadoop.yarn.api.records.Token.newInstance(
realJHSToken.getIdentifier(), realJHSToken.getKind().toString(),
realJHSToken.getPassword(), realJHSToken.getService().toString());
response.setDelegationToken(mrDToken);
return response;
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be renewed only with kerberos authentication");
}
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<MRDelegationTokenIdentifier> token =
new Token<MRDelegationTokenIdentifier>(
protoToken.getIdentifier().array(), protoToken.getPassword()
.array(), new Text(protoToken.getKind()), new Text(
protoToken.getService()));
String user = UserGroupInformation.getCurrentUser().getShortUserName();
long nextExpTime = jhsDTSecretManager.renewToken(token, user);
RenewDelegationTokenResponse renewResponse = Records
.newRecord(RenewDelegationTokenResponse.class);
renewResponse.setNextExpirationTime(nextExpTime);
return renewResponse;
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException {
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be cancelled only with kerberos authentication");
}
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<MRDelegationTokenIdentifier> token =
new Token<MRDelegationTokenIdentifier>(
protoToken.getIdentifier().array(), protoToken.getPassword()
.array(), new Text(protoToken.getKind()), new Text(
protoToken.getService()));
String user = UserGroupInformation.getCurrentUser().getUserName();
jhsDTSecretManager.cancelToken(token, user);
return Records.newRecord(CancelDelegationTokenResponse.class);
}
private void checkAccess(Job job, JobACL jobOperation)
throws IOException {
UserGroupInformation callerUGI;
callerUGI = UserGroupInformation.getCurrentUser();
if (!job.checkAccess(callerUGI, jobOperation)) {
throw new IOException(new AccessControlException("User "
+ callerUGI.getShortUserName() + " cannot perform operation "
+ jobOperation.name() + " on " + job.getID()));
}
}
private boolean isAllowedDelegationTokenOp() throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
return EnumSet.of(AuthenticationMethod.KERBEROS,
AuthenticationMethod.KERBEROS_SSL,
AuthenticationMethod.CERTIFICATE)
.contains(UserGroupInformation.getCurrentUser()
.getRealAuthenticationMethod());
} else {
return true;
}
}
}
}
| 18,290 | 40.382353 | 134 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.DelegationKey;
/**
* A MapReduce specific delegation token secret manager.
* The secret manager is responsible for generating and accepting the password
* for each token.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JHSDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<MRDelegationTokenIdentifier> {
private static final Log LOG = LogFactory.getLog(
JHSDelegationTokenSecretManager.class);
private HistoryServerStateStoreService store;
/**
* Create a secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
* for expired tokens in milliseconds
* @param store history server state store for persisting state
*/
public JHSDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval,
HistoryServerStateStoreService store) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.store = store;
}
@Override
public MRDelegationTokenIdentifier createIdentifier() {
return new MRDelegationTokenIdentifier();
}
@Override
protected void storeNewMasterKey(DelegationKey key) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + key.getKeyId());
}
try {
store.storeTokenMasterKey(key);
} catch (IOException e) {
LOG.error("Unable to store master key " + key.getKeyId(), e);
}
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing master key " + key.getKeyId());
}
try {
store.removeTokenMasterKey(key);
} catch (IOException e) {
LOG.error("Unable to remove master key " + key.getKeyId(), e);
}
}
@Override
protected void storeNewToken(MRDelegationTokenIdentifier tokenId,
long renewDate) {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
try {
store.storeToken(tokenId, renewDate);
} catch (IOException e) {
LOG.error("Unable to store token " + tokenId.getSequenceNumber(), e);
}
}
@Override
protected void removeStoredToken(MRDelegationTokenIdentifier tokenId)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
try {
store.removeToken(tokenId);
} catch (IOException e) {
LOG.error("Unable to remove token " + tokenId.getSequenceNumber(), e);
}
}
@Override
protected void updateStoredToken(MRDelegationTokenIdentifier tokenId,
long renewDate) {
if (LOG.isDebugEnabled()) {
LOG.debug("Updating token " + tokenId.getSequenceNumber());
}
try {
store.updateToken(tokenId, renewDate);
} catch (IOException e) {
LOG.error("Unable to update token " + tokenId.getSequenceNumber(), e);
}
}
public void recover(HistoryServerState state) throws IOException {
LOG.info("Recovering " + getClass().getSimpleName());
for (DelegationKey key : state.tokenMasterKeyState) {
addKey(key);
}
for (Entry<MRDelegationTokenIdentifier, Long> entry :
state.tokenState.entrySet()) {
addPersistedDelegationToken(entry.getKey(), entry.getValue());
}
}
}
| 5,350 | 34.912752 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
public interface HistoryContext extends AppContext {
Map<JobId, Job> getAllJobs(ApplicationId appID);
JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState);
}
| 1,461 | 38.513514 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
private static final Log LOG = LogFactory.getLog(PartialJob.class);
private JobIndexInfo jobIndexInfo = null;
private JobId jobId = null;
private JobReport jobReport = null;
public PartialJob(JobIndexInfo jobIndexInfo, JobId jobId) {
this.jobIndexInfo = jobIndexInfo;
this.jobId = jobId;
jobReport = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobReport.class);
jobReport.setSubmitTime(jobIndexInfo.getSubmitTime());
jobReport.setStartTime(jobIndexInfo.getJobStartTime());
jobReport.setFinishTime(jobIndexInfo.getFinishTime());
jobReport.setJobState(getState());
}
@Override
public JobId getID() {
// return jobIndexInfo.getJobId();
return this.jobId;
}
@Override
public String getName() {
return jobIndexInfo.getJobName();
}
@Override
public String getQueueName() {
return jobIndexInfo.getQueueName();
}
@Override
public JobState getState() {
JobState js = null;
try {
js = JobState.valueOf(jobIndexInfo.getJobStatus());
} catch (Exception e) {
// Meant for use by the display UI. Exception would prevent it from being
// rendered.e Defaulting to KILLED
LOG.warn("Exception while parsing job state. Defaulting to KILLED", e);
js = JobState.KILLED;
}
return js;
}
@Override
public JobReport getReport() {
return jobReport;
}
@Override
public float getProgress() {
return 1.0f;
}
@Override
public Counters getAllCounters() {
return null;
}
@Override
public Map<TaskId, Task> getTasks() {
return null;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
return null;
}
@Override
public Task getTask(TaskId taskID) {
return null;
}
@Override
public List<String> getDiagnostics() {
return null;
}
@Override
public int getTotalMaps() {
return jobIndexInfo.getNumMaps();
}
@Override
public int getTotalReduces() {
return jobIndexInfo.getNumReduces();
}
@Override
public int getCompletedMaps() {
return jobIndexInfo.getNumMaps();
}
@Override
public int getCompletedReduces() {
return jobIndexInfo.getNumReduces();
}
@Override
public boolean isUber() {
return false;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
return null;
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return null;
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
return jobIndexInfo.getUser();
}
@Override
public Path getConfFile() {
throw new IllegalStateException("Not implemented yet");
}
@Override
public Configuration loadConfFile() {
throw new IllegalStateException("Not implemented yet");
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
throw new IllegalStateException("Not implemented yet");
}
@Override
public List<AMInfo> getAMInfos() {
return null;
}
@Override
public void setQueueName(String queueName) {
throw new UnsupportedOperationException("Can't set job's queue name in history");
}
}
| 5,339 | 25.7 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import com.google.common.annotations.VisibleForTesting;
/**
* Manages an in memory cache of parsed Job History files.
*/
public class CachedHistoryStorage extends AbstractService implements
HistoryStorage {
private static final Log LOG = LogFactory.getLog(CachedHistoryStorage.class);
private Map<JobId, Job> loadedJobCache = null;
// The number of loaded jobs.
private int loadedJobCacheSize;
private HistoryFileManager hsManager;
@Override
public void setHistoryFileManager(HistoryFileManager hsManager) {
this.hsManager = hsManager;
}
@Override
public void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
LOG.info("CachedHistoryStorage Init");
createLoadedJobCache(conf);
}
@SuppressWarnings("serial")
private void createLoadedJobCache(Configuration conf) {
loadedJobCacheSize = conf.getInt(
JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE,
JHAdminConfig.DEFAULT_MR_HISTORY_LOADED_JOB_CACHE_SIZE);
loadedJobCache = Collections.synchronizedMap(new LinkedHashMap<JobId, Job>(
loadedJobCacheSize + 1, 0.75f, true) {
@Override
public boolean removeEldestEntry(final Map.Entry<JobId, Job> eldest) {
return super.size() > loadedJobCacheSize;
}
});
}
public void refreshLoadedJobCache() {
if (getServiceState() == STATE.STARTED) {
setConfig(createConf());
createLoadedJobCache(getConfig());
} else {
LOG.warn("Failed to execute refreshLoadedJobCache: CachedHistoryStorage is not started");
}
}
@VisibleForTesting
Configuration createConf() {
return new Configuration();
}
public CachedHistoryStorage() {
super(CachedHistoryStorage.class.getName());
}
private Job loadJob(HistoryFileInfo fileInfo) {
try {
Job job = fileInfo.loadJob();
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + job.getID() + " to loaded job cache");
}
// We can clobber results here, but that should be OK, because it only
// means that we may have two identical copies of the same job floating
// around for a while.
loadedJobCache.put(job.getID(), job);
return job;
} catch (IOException e) {
throw new YarnRuntimeException(
"Could not find/load job: " + fileInfo.getJobId(), e);
}
}
@VisibleForTesting
Map<JobId, Job> getLoadedJobCache() {
return loadedJobCache;
}
@Override
public Job getFullJob(JobId jobId) {
if (LOG.isDebugEnabled()) {
LOG.debug("Looking for Job " + jobId);
}
try {
HistoryFileInfo fileInfo = hsManager.getFileInfo(jobId);
Job result = null;
if (fileInfo != null) {
result = loadedJobCache.get(jobId);
if (result == null) {
result = loadJob(fileInfo);
} else if(fileInfo.isDeleted()) {
loadedJobCache.remove(jobId);
result = null;
}
} else {
loadedJobCache.remove(jobId);
}
return result;
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
@Override
public Map<JobId, Job> getAllPartialJobs() {
LOG.debug("Called getAllPartialJobs()");
SortedMap<JobId, Job> result = new TreeMap<JobId, Job>();
try {
for (HistoryFileInfo mi : hsManager.getAllFileInfo()) {
if (mi != null) {
JobId id = mi.getJobId();
result.put(id, new PartialJob(mi.getJobIndexInfo(), id));
}
}
} catch (IOException e) {
LOG.warn("Error trying to scan for all FileInfos", e);
throw new YarnRuntimeException(e);
}
return result;
}
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd,
JobState jobState) {
return getPartialJobs(getAllPartialJobs().values(), offset, count, user,
queue, sBegin, sEnd, fBegin, fEnd, jobState);
}
public static JobsInfo getPartialJobs(Collection<Job> jobs, Long offset,
Long count, String user, String queue, Long sBegin, Long sEnd,
Long fBegin, Long fEnd, JobState jobState) {
JobsInfo allJobs = new JobsInfo();
if (sBegin == null || sBegin < 0)
sBegin = 0l;
if (sEnd == null)
sEnd = Long.MAX_VALUE;
if (fBegin == null || fBegin < 0)
fBegin = 0l;
if (fEnd == null)
fEnd = Long.MAX_VALUE;
if (offset == null || offset < 0)
offset = 0l;
if (count == null)
count = Long.MAX_VALUE;
if (offset > jobs.size()) {
return allJobs;
}
long at = 0;
long end = offset + count - 1;
if (end < 0) { // due to overflow
end = Long.MAX_VALUE;
}
for (Job job : jobs) {
if (at > end) {
break;
}
// can't really validate queue is a valid one since queues could change
if (queue != null && !queue.isEmpty()) {
if (!job.getQueueName().equals(queue)) {
continue;
}
}
if (user != null && !user.isEmpty()) {
if (!job.getUserName().equals(user)) {
continue;
}
}
JobReport report = job.getReport();
if (report.getStartTime() < sBegin || report.getStartTime() > sEnd) {
continue;
}
if (report.getFinishTime() < fBegin || report.getFinishTime() > fEnd) {
continue;
}
if (jobState != null && jobState != report.getJobState()) {
continue;
}
at++;
if ((at - 1) < offset) {
continue;
}
JobInfo jobInfo = new JobInfo(job);
allJobs.add(jobInfo);
}
return allJobs;
}
}
| 7,432 | 29.093117 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocolPB/HSAdminRefreshProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLoadedJobCacheRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshJobRetentionSettingsRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLogRetentionSettingsRequestProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class HSAdminRefreshProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, HSAdminRefreshProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final HSAdminRefreshProtocolPB rpcProxy;
private final static RefreshAdminAclsRequestProto
VOID_REFRESH_ADMIN_ACLS_REQUEST = RefreshAdminAclsRequestProto
.newBuilder().build();
private final static RefreshLoadedJobCacheRequestProto
VOID_REFRESH_LOADED_JOB_CACHE_REQUEST = RefreshLoadedJobCacheRequestProto
.newBuilder().build();
private final static RefreshJobRetentionSettingsRequestProto
VOID_REFRESH_JOB_RETENTION_SETTINGS_REQUEST =
RefreshJobRetentionSettingsRequestProto.newBuilder().build();
private final static RefreshLogRetentionSettingsRequestProto
VOID_REFRESH_LOG_RETENTION_SETTINGS_REQUEST =
RefreshLogRetentionSettingsRequestProto.newBuilder().build();
public HSAdminRefreshProtocolClientSideTranslatorPB(
HSAdminRefreshProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public void refreshAdminAcls() throws IOException {
try {
rpcProxy.refreshAdminAcls(NULL_CONTROLLER,
VOID_REFRESH_ADMIN_ACLS_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public void refreshLoadedJobCache() throws IOException {
try {
rpcProxy.refreshLoadedJobCache(NULL_CONTROLLER,
VOID_REFRESH_LOADED_JOB_CACHE_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public void refreshJobRetentionSettings() throws IOException {
try {
rpcProxy.refreshJobRetentionSettings(NULL_CONTROLLER,
VOID_REFRESH_JOB_RETENTION_SETTINGS_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public void refreshLogRetentionSettings() throws IOException {
try {
rpcProxy.refreshLogRetentionSettings(NULL_CONTROLLER,
VOID_REFRESH_LOG_RETENTION_SETTINGS_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
HSAdminRefreshProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(HSAdminRefreshProtocolPB.class), methodName);
}
}
| 4,513 | 35.699187 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/protocolPB/HSAdminRefreshProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLoadedJobCacheRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLoadedJobCacheResponseProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshJobRetentionSettingsRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshJobRetentionSettingsResponseProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLogRetentionSettingsRequestProto;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.RefreshLogRetentionSettingsResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class HSAdminRefreshProtocolServerSideTranslatorPB implements
HSAdminRefreshProtocolPB {
private final HSAdminRefreshProtocol impl;
private final static RefreshAdminAclsResponseProto
VOID_REFRESH_ADMIN_ACLS_RESPONSE = RefreshAdminAclsResponseProto
.newBuilder().build();
private final static RefreshLoadedJobCacheResponseProto
VOID_REFRESH_LOADED_JOB_CACHE_RESPONSE = RefreshLoadedJobCacheResponseProto
.newBuilder().build();
private final static RefreshJobRetentionSettingsResponseProto
VOID_REFRESH_JOB_RETENTION_SETTINGS_RESPONSE =
RefreshJobRetentionSettingsResponseProto.newBuilder().build();
private final static RefreshLogRetentionSettingsResponseProto
VOID_REFRESH_LOG_RETENTION_SETTINGS_RESPONSE =
RefreshLogRetentionSettingsResponseProto.newBuilder().build();
public HSAdminRefreshProtocolServerSideTranslatorPB(
HSAdminRefreshProtocol impl) {
this.impl = impl;
}
@Override
public RefreshAdminAclsResponseProto refreshAdminAcls(
RpcController controller, RefreshAdminAclsRequestProto request)
throws ServiceException {
try {
impl.refreshAdminAcls();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_ADMIN_ACLS_RESPONSE;
}
@Override
public RefreshLoadedJobCacheResponseProto refreshLoadedJobCache(
RpcController controller, RefreshLoadedJobCacheRequestProto request)
throws ServiceException {
try {
impl.refreshLoadedJobCache();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_LOADED_JOB_CACHE_RESPONSE;
}
@Override
public RefreshJobRetentionSettingsResponseProto refreshJobRetentionSettings(
RpcController controller,
RefreshJobRetentionSettingsRequestProto request)
throws ServiceException {
try {
impl.refreshJobRetentionSettings();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_JOB_RETENTION_SETTINGS_RESPONSE;
}
@Override
public RefreshLogRetentionSettingsResponseProto refreshLogRetentionSettings(
RpcController controller,
RefreshLogRetentionSettingsRequestProto request)
throws ServiceException {
try {
impl.refreshLogRetentionSettings();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_LOG_RETENTION_SETTINGS_RESPONSE;
}
}
| 4,552 | 38.591304 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
import org.apache.hadoop.mapreduce.v2.app.webapp.CountersBlock;
import org.apache.hadoop.yarn.webapp.SubView;
/**
* Render the counters page
*/
public class HsCountersPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
setActiveNavColumnForTask();
set(DATATABLES_SELECTOR, "#counters .dt-counters");
set(initSelector(DATATABLES),
"{bJQueryUI:true, sDom:'t', iDisplayLength:-1}");
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#postHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void postHead(Page.HTML<_> html) {
html.
style("#counters, .dt-counters { table-layout: fixed }",
"#counters th { overflow: hidden; vertical-align: middle }",
"#counters .dataTables_wrapper { min-height: 1em }",
"#counters .group { width: 15em }",
"#counters .name { width: 30em }");
}
/**
* The content of this page is the CountersBlock now.
* @return CountersBlock.class
*/
@Override protected Class<? extends SubView> content() {
return CountersBlock.class;
}
}
| 2,228 | 33.828125 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/JAXBContextResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import com.google.inject.Singleton;
import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.api.json.JSONJAXBContext;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
@Singleton
@Provider
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
private JAXBContext context;
private final Set<Class> types;
// you have to specify all the dao classes here
private final Class[] cTypes = { HistoryInfo.class, JobInfo.class,
JobsInfo.class, TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class,
ConfInfo.class, CounterInfo.class, JobTaskCounterInfo.class,
JobTaskAttemptCounterInfo.class, TaskCounterInfo.class,
JobCounterInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class,
TaskAttemptsInfo.class, CounterGroupInfo.class,
TaskCounterGroupInfo.class, AMAttemptInfo.class, AMAttemptsInfo.class,
RemoteExceptionData.class };
public JAXBContextResolver() throws Exception {
this.types = new HashSet<Class>(Arrays.asList(cTypes));
this.context = new JSONJAXBContext(JSONConfiguration.natural()
.rootUnwrapping(false).build(), cTypes);
}
@Override
public JAXBContext getContext(Class<?> objectType) {
return (types.contains(objectType)) ? context : null;
}
}
| 3,570 | 42.54878 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlock;
public class HsLogsPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
setActiveNavColumnForTask();
}
/**
* The content of this page is the JobBlock
* @return HsJobBlock.class
*/
@Override protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
}
}
| 1,464 | 34.731707 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import org.apache.hadoop.classification.InterfaceAudience;
| 942 | 43.904762 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import org.apache.hadoop.mapreduce.v2.app.webapp.ConfBlock;
import org.apache.hadoop.yarn.webapp.SubView;
/**
* Render a page with the configuration for a give job in it.
*/
public class HsConfPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
String jobID = $(JOB_ID);
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
: join("Configuration for MapReduce Job ", $(JOB_ID)));
commonPreHead(html);
set(DATATABLES_ID, "conf");
set(initID(DATATABLES, "conf"), confTableInit());
set(postInitID(DATATABLES, "conf"), confPostTableInit());
setTableStyles(html, "conf");
//Override the default nav config
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
}
/**
* The body of this block is the configuration block.
* @return HsConfBlock.class
*/
@Override protected Class<? extends SubView> content() {
return ConfBlock.class;
}
/**
* @return the end of the JS map that is the jquery datatable config for the
* conf table.
*/
private String confTableInit() {
return tableInit().append("}").toString();
}
/**
* @return the java script code to allow the jquery conf datatable to filter
* by column.
*/
private String confPostTableInit() {
return "var confInitVals = new Array();\n" +
"$('tfoot input').keyup( function () \n{"+
" confDataTable.fnFilter( this.value, $('tfoot input').index(this) );\n"+
"} );\n"+
"$('tfoot input').each( function (i) {\n"+
" confInitVals[i] = this.value;\n"+
"} );\n"+
"$('tfoot input').focus( function () {\n"+
" if ( this.className == 'search_init' )\n"+
" {\n"+
" this.className = '';\n"+
" this.value = '';\n"+
" }\n"+
"} );\n"+
"$('tfoot input').blur( function (i) {\n"+
" if ( this.value == '' )\n"+
" {\n"+
" this.className = 'search_init';\n"+
" this.value = confInitVals[$('tfoot input').index(this)];\n"+
" }\n"+
"} );\n";
}
}
| 3,558 | 34.59 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
/**
* A Page the shows info about the history server
*/
public class HsAboutPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
//override the nav config from commonPReHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
}
/**
* The content of this page is the attempts block
* @return AttemptsBlock.class
*/
@Override protected Class<? extends SubView> content() {
HistoryInfo info = new HistoryInfo();
info("History Server").
_("BuildVersion", info.getHadoopBuildVersion()
+ " on " + info.getHadoopVersionBuiltOn()).
_("History Server started on", Times.format(info.getStartedOn()));
return InfoBlock.class;
}
}
| 2,132 | 35.775862 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_SELECTOR;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initSelector;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.SubView;
/**
* A page showing the tasks for a given application.
*/
public class HsTasksPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
set(DATATABLES_ID, "tasks");
set(DATATABLES_SELECTOR, ".dt-tasks" );
set(initSelector(DATATABLES), tasksTableInit());
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
set(initID(DATATABLES, "tasks"), tasksTableInit());
set(postInitID(DATATABLES, "tasks"), jobsPostTableInit());
setTableStyles(html, "tasks");
}
/**
* The content of this page is the TasksBlock
* @return HsTasksBlock.class
*/
@Override protected Class<? extends SubView> content() {
return HsTasksBlock.class;
}
/**
* @return the end of the JS map that is the jquery datatable configuration
* for the tasks table.
*/
private String tasksTableInit() {
TaskType type = null;
String symbol = $(TASK_TYPE);
if (!symbol.isEmpty()) {
type = MRApps.taskType(symbol);
}
StringBuilder b = tableInit().
append(", 'aaData': tasksTableData")
.append(", bDeferRender: true")
.append(", bProcessing: true")
.append("\n, aoColumnDefs: [\n")
.append("{'sType':'string', 'aTargets': [ 0 ]")
.append(", 'mRender': parseHadoopID }")
.append(", {'sType':'numeric', 'aTargets': [ 4")
.append(type == TaskType.REDUCE ? ", 9, 10, 11, 12" : ", 7")
.append(" ], 'mRender': renderHadoopElapsedTime }")
.append("\n, {'sType':'numeric', 'aTargets': [ 2, 3, 5")
.append(type == TaskType.REDUCE ? ", 6, 7, 8" : ", 6")
.append(" ], 'mRender': renderHadoopDate }]")
// Sort by id upon page load
.append("\n, aaSorting: [[0, 'asc']]")
.append("}");
return b.toString();
}
private String jobsPostTableInit() {
return "var asInitVals = new Array();\n" +
"$('tfoot input').keyup( function () \n{"+
" tasksDataTable.fnFilter( this.value, $('tfoot input').index(this) );\n"+
"} );\n"+
"$('tfoot input').each( function (i) {\n"+
" asInitVals[i] = this.value;\n"+
"} );\n"+
"$('tfoot input').focus( function () {\n"+
" if ( this.className == 'search_init' )\n"+
" {\n"+
" this.className = '';\n"+
" this.value = '';\n"+
" }\n"+
"} );\n"+
"$('tfoot input').blur( function (i) {\n"+
" if ( this.value == '' )\n"+
" {\n"+
" this.className = 'search_init';\n"+
" this.value = asInitVals[$('tfoot input').index(this)];\n"+
" }\n"+
"} );\n";
}
}
| 4,470 | 36.258333 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
/**
* Render all of the jobs that the history server is aware of.
*/
public class HsJobsBlock extends HtmlBlock {
final AppContext appContext;
final SimpleDateFormat dateFormat =
new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");
@Inject HsJobsBlock(AppContext appCtx) {
appContext = appCtx;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override protected void render(Block html) {
TBODY<TABLE<Hamlet>> tbody = html.
h2("Retired Jobs").
table("#jobs").
thead().
tr().
th("Submit Time").
th("Start Time").
th("Finish Time").
th(".id", "Job ID").
th(".name", "Name").
th("User").
th("Queue").
th(".state", "State").
th("Maps Total").
th("Maps Completed").
th("Reduces Total").
th("Reduces Completed")._()._().
tbody();
LOG.info("Getting list of all Jobs.");
// Write all the data into a JavaScript array of arrays for JQuery
// DataTables to display
StringBuilder jobsTableData = new StringBuilder("[\n");
for (Job j : appContext.getAllJobs().values()) {
JobInfo job = new JobInfo(j);
jobsTableData.append("[\"")
.append(dateFormat.format(new Date(job.getSubmitTime()))).append("\",\"")
.append(dateFormat.format(new Date(job.getStartTime()))).append("\",\"")
.append(dateFormat.format(new Date(job.getFinishTime()))).append("\",\"")
.append("<a href='").append(url("job", job.getId())).append("'>")
.append(job.getId()).append("</a>\",\"")
.append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
job.getName()))).append("\",\"")
.append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
job.getUserName()))).append("\",\"")
.append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
job.getQueueName()))).append("\",\"")
.append(job.getState()).append("\",\"")
.append(String.valueOf(job.getMapsTotal())).append("\",\"")
.append(String.valueOf(job.getMapsCompleted())).append("\",\"")
.append(String.valueOf(job.getReducesTotal())).append("\",\"")
.append(String.valueOf(job.getReducesCompleted())).append("\"],\n");
}
//Remove the last comma and close off the array of arrays
if(jobsTableData.charAt(jobsTableData.length() - 2) == ',') {
jobsTableData.delete(jobsTableData.length()-2, jobsTableData.length()-1);
}
jobsTableData.append("]");
html.script().$type("text/javascript").
_("var jobsTableData=" + jobsTableData)._();
tbody._().
tfoot().
tr().
th().input("search_init").$type(InputType.text).$name("submit_time").$value("Submit Time")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Start Time")._()._().
th().input("search_init").$type(InputType.text).$name("finish_time").$value("Finish Time")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Job ID")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Name")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("User")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Queue")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("State")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Total")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Completed")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Total")._()._().
th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Completed")._()._().
_().
_().
_();
}
}
| 5,530 | 43.96748 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_OWNER;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_LOG_TYPE;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.NM_NODENAME;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
public class HsWebApp extends WebApp implements AMParams {
private HistoryContext history;
public HsWebApp(HistoryContext history) {
this.history = history;
}
@Override
public void setup() {
bind(HsWebServices.class);
bind(JAXBContextResolver.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(history);
bind(HistoryContext.class).toInstance(history);
route("/", HsController.class);
route("/app", HsController.class);
route(pajoin("/job", JOB_ID), HsController.class, "job");
route(pajoin("/conf", JOB_ID), HsController.class, "conf");
route(pajoin("/jobcounters", JOB_ID), HsController.class, "jobCounters");
route(pajoin("/singlejobcounter",JOB_ID, COUNTER_GROUP, COUNTER_NAME),
HsController.class, "singleJobCounter");
route(pajoin("/tasks", JOB_ID, TASK_TYPE), HsController.class, "tasks");
route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE),
HsController.class, "attempts");
route(pajoin("/task", TASK_ID), HsController.class, "task");
route(pajoin("/taskcounters", TASK_ID), HsController.class, "taskCounters");
route(pajoin("/singletaskcounter",TASK_ID, COUNTER_GROUP, COUNTER_NAME),
HsController.class, "singleTaskCounter");
route("/about", HsController.class, "about");
route(pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
CONTAINER_LOG_TYPE), HsController.class, "logs");
route(pajoin("/nmlogs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
CONTAINER_LOG_TYPE), HsController.class, "nmlogs");
}
}
| 3,159 | 43.507042 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.UriInfo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebServices;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
import org.apache.hadoop.yarn.webapp.WebApp;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.Inject;
@Path("/ws/v1/history")
public class HsWebServices {
private final HistoryContext ctx;
private WebApp webapp;
private @Context HttpServletResponse response;
@Context
UriInfo uriInfo;
@Inject
public HsWebServices(final HistoryContext ctx, final Configuration conf,
final WebApp webapp) {
this.ctx = ctx;
this.webapp = webapp;
}
private boolean hasAccess(Job job, HttpServletRequest request) {
String remoteUser = request.getRemoteUser();
if (remoteUser != null) {
return job.checkAccess(UserGroupInformation.createRemoteUser(remoteUser),
JobACL.VIEW_JOB);
}
return true;
}
private void checkAccess(Job job, HttpServletRequest request) {
if (!hasAccess(job, request)) {
throw new WebApplicationException(Status.UNAUTHORIZED);
}
}
private void init() {
//clear content type
response.setContentType(null);
}
@VisibleForTesting
void setResponse(HttpServletResponse response) {
this.response = response;
}
@GET
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public HistoryInfo get() {
return getHistoryInfo();
}
@GET
@Path("/info")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public HistoryInfo getHistoryInfo() {
init();
return new HistoryInfo();
}
@GET
@Path("/mapreduce/jobs")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobsInfo getJobs(@QueryParam("user") String userQuery,
@QueryParam("limit") String count,
@QueryParam("state") String stateQuery,
@QueryParam("queue") String queueQuery,
@QueryParam("startedTimeBegin") String startedBegin,
@QueryParam("startedTimeEnd") String startedEnd,
@QueryParam("finishedTimeBegin") String finishBegin,
@QueryParam("finishedTimeEnd") String finishEnd) {
Long countParam = null;
init();
if (count != null && !count.isEmpty()) {
try {
countParam = Long.parseLong(count);
} catch (NumberFormatException e) {
throw new BadRequestException(e.getMessage());
}
if (countParam <= 0) {
throw new BadRequestException("limit value must be greater then 0");
}
}
Long sBegin = null;
if (startedBegin != null && !startedBegin.isEmpty()) {
try {
sBegin = Long.parseLong(startedBegin);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (sBegin < 0) {
throw new BadRequestException("startedTimeBegin must be greater than 0");
}
}
Long sEnd = null;
if (startedEnd != null && !startedEnd.isEmpty()) {
try {
sEnd = Long.parseLong(startedEnd);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (sEnd < 0) {
throw new BadRequestException("startedTimeEnd must be greater than 0");
}
}
if (sBegin != null && sEnd != null && sBegin > sEnd) {
throw new BadRequestException(
"startedTimeEnd must be greater than startTimeBegin");
}
Long fBegin = null;
if (finishBegin != null && !finishBegin.isEmpty()) {
try {
fBegin = Long.parseLong(finishBegin);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (fBegin < 0) {
throw new BadRequestException("finishedTimeBegin must be greater than 0");
}
}
Long fEnd = null;
if (finishEnd != null && !finishEnd.isEmpty()) {
try {
fEnd = Long.parseLong(finishEnd);
} catch (NumberFormatException e) {
throw new BadRequestException("Invalid number format: " + e.getMessage());
}
if (fEnd < 0) {
throw new BadRequestException("finishedTimeEnd must be greater than 0");
}
}
if (fBegin != null && fEnd != null && fBegin > fEnd) {
throw new BadRequestException(
"finishedTimeEnd must be greater than finishedTimeBegin");
}
JobState jobState = null;
if (stateQuery != null) {
jobState = JobState.valueOf(stateQuery);
}
return ctx.getPartialJobs(0l, countParam, userQuery, queueQuery,
sBegin, sEnd, fBegin, fEnd, jobState);
}
@GET
@Path("/mapreduce/jobs/{jobid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobInfo getJob(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
return new JobInfo(job);
}
@GET
@Path("/mapreduce/jobs/{jobid}/jobattempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
AMAttemptsInfo amAttempts = new AMAttemptsInfo();
for (AMInfo amInfo : job.getAMInfos()) {
AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(job
.getID()), job.getUserName(), uriInfo.getBaseUri().toString(),
webapp.name());
amAttempts.add(attempt);
}
return amAttempts;
}
@GET
@Path("/mapreduce/jobs/{jobid}/counters")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
return new JobCounterInfo(this.ctx, job);
}
@GET
@Path("/mapreduce/jobs/{jobid}/conf")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public ConfInfo getJobConf(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
ConfInfo info;
try {
info = new ConfInfo(job);
} catch (IOException e) {
throw new NotFoundException("unable to load configuration for job: "
+ jid);
}
return info;
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @QueryParam("type") String type) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
TasksInfo allTasks = new TasksInfo();
for (Task task : job.getTasks().values()) {
TaskType ttype = null;
if (type != null && !type.isEmpty()) {
try {
ttype = MRApps.taskType(type);
} catch (YarnRuntimeException e) {
throw new BadRequestException("tasktype must be either m or r");
}
}
if (ttype != null && task.getType() != ttype) {
continue;
}
allTasks.add(new TaskInfo(task));
}
return allTasks;
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskInfo getJobTask(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
return new TaskInfo(task);
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/counters")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobTaskCounterInfo getSingleTaskCounters(
@Context HttpServletRequest hsr, @PathParam("jobid") String jid,
@PathParam("taskid") String tid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
TaskId taskID = MRApps.toTaskID(tid);
if (taskID == null) {
throw new NotFoundException("taskid " + tid + " not found or invalid");
}
Task task = job.getTask(taskID);
if (task == null) {
throw new NotFoundException("task not found with id " + tid);
}
return new JobTaskCounterInfo(task);
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
for (TaskAttempt ta : task.getAttempts().values()) {
if (ta != null) {
if (task.getType() == TaskType.REDUCE) {
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
} else {
attempts.add(new TaskAttemptInfo(ta, task.getType(), false));
}
}
}
return attempts;
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid,
@PathParam("attemptid") String attId) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
task);
if (task.getType() == TaskType.REDUCE) {
return new ReduceTaskAttemptInfo(ta, task.getType());
} else {
return new TaskAttemptInfo(ta, task.getType(), false);
}
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(
@Context HttpServletRequest hsr, @PathParam("jobid") String jid,
@PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
task);
return new JobTaskAttemptCounterInfo(ta);
}
}
| 13,926 | 33.992462 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
/**
* Render the a table of tasks for a given type.
*/
public class HsTasksBlock extends HtmlBlock {
final App app;
@Inject HsTasksBlock(App app) {
this.app = app;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override protected void render(Block html) {
if (app.getJob() == null) {
html.
h2($(TITLE));
return;
}
TaskType type = null;
String symbol = $(TASK_TYPE);
if (!symbol.isEmpty()) {
type = MRApps.taskType(symbol);
}
THEAD<TABLE<Hamlet>> thead;
if(type != null)
thead = html.table("#"+app.getJob().getID()
+ type).$class("dt-tasks").thead();
else
thead = html.table("#tasks").thead();
//Create the spanning row
int attemptColSpan = type == TaskType.REDUCE ? 8 : 3;
thead.tr().
th().$colspan(5).$class("ui-state-default")._("Task")._().
th().$colspan(attemptColSpan).$class("ui-state-default").
_("Successful Attempt")._().
_();
TR<THEAD<TABLE<Hamlet>>> theadRow = thead.
tr().
th("Name").
th("State").
th("Start Time").
th("Finish Time").
th("Elapsed Time").
th("Start Time"); //Attempt
if(type == TaskType.REDUCE) {
theadRow.th("Shuffle Finish Time"); //Attempt
theadRow.th("Merge Finish Time"); //Attempt
}
theadRow.th("Finish Time"); //Attempt
if(type == TaskType.REDUCE) {
theadRow.th("Elapsed Time Shuffle"); //Attempt
theadRow.th("Elapsed Time Merge"); //Attempt
theadRow.th("Elapsed Time Reduce"); //Attempt
}
theadRow.th("Elapsed Time"); //Attempt
TBODY<TABLE<Hamlet>> tbody = theadRow._()._().tbody();
// Write all the data into a JavaScript array of arrays for JQuery
// DataTables to display
StringBuilder tasksTableData = new StringBuilder("[\n");
for (Task task : app.getJob().getTasks().values()) {
if (type != null && task.getType() != type) {
continue;
}
TaskInfo info = new TaskInfo(task);
String tid = info.getId();
long startTime = info.getStartTime();
long finishTime = info.getFinishTime();
long elapsed = info.getElapsedTime();
long attemptStartTime = -1;
long shuffleFinishTime = -1;
long sortFinishTime = -1;
long attemptFinishTime = -1;
long elapsedShuffleTime = -1;
long elapsedSortTime = -1;;
long elapsedReduceTime = -1;
long attemptElapsed = -1;
TaskAttempt successful = info.getSuccessful();
if(successful != null) {
TaskAttemptInfo ta;
if(type == TaskType.REDUCE) {
ReduceTaskAttemptInfo rta = new ReduceTaskAttemptInfo(successful, type);
shuffleFinishTime = rta.getShuffleFinishTime();
sortFinishTime = rta.getMergeFinishTime();
elapsedShuffleTime = rta.getElapsedShuffleTime();
elapsedSortTime = rta.getElapsedMergeTime();
elapsedReduceTime = rta.getElapsedReduceTime();
ta = rta;
} else {
ta = new TaskAttemptInfo(successful, type, false);
}
attemptStartTime = ta.getStartTime();
attemptFinishTime = ta.getFinishTime();
attemptElapsed = ta.getElapsedTime();
}
tasksTableData.append("[\"")
.append("<a href='" + url("task", tid)).append("'>")
.append(tid).append("</a>\",\"")
.append(info.getState()).append("\",\"")
.append(startTime).append("\",\"")
.append(finishTime).append("\",\"")
.append(elapsed).append("\",\"")
.append(attemptStartTime).append("\",\"");
if(type == TaskType.REDUCE) {
tasksTableData.append(shuffleFinishTime).append("\",\"")
.append(sortFinishTime).append("\",\"");
}
tasksTableData.append(attemptFinishTime).append("\",\"");
if(type == TaskType.REDUCE) {
tasksTableData.append(elapsedShuffleTime).append("\",\"")
.append(elapsedSortTime).append("\",\"")
.append(elapsedReduceTime).append("\",\"");
}
tasksTableData.append(attemptElapsed).append("\"],\n");
}
//Remove the last comma and close off the array of arrays
if(tasksTableData.charAt(tasksTableData.length() - 2) == ',') {
tasksTableData.delete(
tasksTableData.length()-2, tasksTableData.length()-1);
}
tasksTableData.append("]");
html.script().$type("text/javascript").
_("var tasksTableData=" + tasksTableData)._();
TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
footRow.th().input("search_init").$type(InputType.text).$name("task")
.$value("ID")._()._().th().input("search_init").$type(InputType.text)
.$name("state").$value("State")._()._().th().input("search_init")
.$type(InputType.text).$name("start_time").$value("Start Time")._()._()
.th().input("search_init").$type(InputType.text).$name("finish_time")
.$value("Finish Time")._()._().th().input("search_init")
.$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._()
._().th().input("search_init").$type(InputType.text)
.$name("attempt_start_time").$value("Start Time")._()._();
if(type == TaskType.REDUCE) {
footRow.th().input("search_init").$type(InputType.text)
.$name("shuffle_time").$value("Shuffle Time")._()._();
footRow.th().input("search_init").$type(InputType.text)
.$name("merge_time").$value("Merge Time")._()._();
}
footRow.th().input("search_init").$type(InputType.text)
.$name("attempt_finish").$value("Finish Time")._()._();
if(type == TaskType.REDUCE) {
footRow.th().input("search_init").$type(InputType.text)
.$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._();
footRow.th().input("search_init").$type(InputType.text)
.$name("elapsed_merge_time").$value("Elapsed Merge Time")._()._();
footRow.th().input("search_init").$type(InputType.text)
.$name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._();
}
footRow.th().input("search_init").$type(InputType.text)
.$name("attempt_elapsed").$value("Elapsed Time")._()._();
footRow._()._()._();
}
}
| 8,271 | 37.654206 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import org.apache.hadoop.yarn.webapp.SubView;
/**
* Render a page that describes a specific job.
*/
public class HsJobPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
String jobID = $(JOB_ID);
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
: join("MapReduce Job ", $(JOB_ID)));
commonPreHead(html);
//Override the nav config from the commonPreHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
}
/**
* The content of this page is the JobBlock
* @return HsJobBlock.class
*/
@Override protected Class<? extends SubView> content() {
return HsJobBlock.class;
}
}
| 1,937 | 34.888889 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.app.webapp.AppController;
import org.apache.hadoop.yarn.webapp.View;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsPage;
import com.google.inject.Inject;
/**
* This class renders the various pages that the History Server WebApp supports
*/
public class HsController extends AppController {
@Inject HsController(App app, Configuration conf, RequestContext ctx) {
super(app, conf, ctx, "History");
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#index()
*/
@Override
public void index() {
setTitle("JobHistory");
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobPage()
*/
@Override
protected Class<? extends View> jobPage() {
return HsJobPage.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#countersPage()
*/
@Override
public Class<? extends View> countersPage() {
return HsCountersPage.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasksPage()
*/
@Override
protected Class<? extends View> tasksPage() {
return HsTasksPage.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskPage()
*/
@Override
protected Class<? extends View> taskPage() {
return HsTaskPage.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#attemptsPage()
*/
@Override
protected Class<? extends View> attemptsPage() {
return HsAttemptsPage.class;
}
// Need all of these methods here also as Guice doesn't look into parent
// classes.
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#job()
*/
@Override
public void job() {
super.job();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobCounters()
*/
@Override
public void jobCounters() {
super.jobCounters();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskCounters()
*/
@Override
public void taskCounters() {
super.taskCounters();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasks()
*/
@Override
public void tasks() {
super.tasks();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#task()
*/
@Override
public void task() {
super.task();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#attempts()
*/
@Override
public void attempts() {
super.attempts();
}
/**
* @return the page that will be used to render the /conf page
*/
@Override
protected Class<? extends View> confPage() {
return HsConfPage.class;
}
/**
* @return the page about the current server.
*/
protected Class<? extends View> aboutPage() {
return HsAboutPage.class;
}
/**
* Render a page about the current server.
*/
public void about() {
render(aboutPage());
}
/**
* Render the logs page.
*/
public void logs() {
String logEntity = $(ENTITY_STRING);
JobID jid = null;
try {
jid = JobID.forName(logEntity);
set(JOB_ID, logEntity);
requireJob();
} catch (Exception e) {
// fall below
}
if (jid == null) {
try {
TaskAttemptID taskAttemptId = TaskAttemptID.forName(logEntity);
set(TASK_ID, taskAttemptId.getTaskID().toString());
set(JOB_ID, taskAttemptId.getJobID().toString());
requireTask();
requireJob();
} catch (Exception e) {
// fall below
}
}
render(HsLogsPage.class);
}
/**
* Render the nm logs page.
*/
public void nmlogs() {
render(AggregatedLogsPage.class);
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleCounterPage()
*/
@Override
protected Class<? extends View> singleCounterPage() {
return HsSingleCounterPage.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleJobCounter()
*/
@Override
public void singleJobCounter() throws IOException{
super.singleJobCounter();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleTaskCounter()
*/
@Override
public void singleTaskCounter() throws IOException{
super.singleTaskCounter();
}
}
| 5,777 | 23.175732 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
/**
* A view that should be used as the base class for all history server pages.
*/
public class HsView extends TwoColumnLayout {
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
set(DATATABLES_ID, "jobs");
set(initID(DATATABLES, "jobs"), jobsTableInit());
set(postInitID(DATATABLES, "jobs"), jobsPostTableInit());
setTableStyles(html, "jobs");
}
/**
* The prehead that should be common to all subclasses.
* @param html used to render.
*/
protected void commonPreHead(Page.HTML<_> html) {
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
}
/**
* Determine which navigation column is active.
*/
protected void setActiveNavColumnForTask() {
String tid = $(TASK_ID);
String activeNav = "2";
if((tid == null || tid.isEmpty())) {
activeNav = "1";
}
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:"+activeNav+"}");
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#nav()
*/
@Override
protected Class<? extends SubView> nav() {
return HsNavBlock.class;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#content()
*/
@Override
protected Class<? extends SubView> content() {
return HsJobsBlock.class;
}
//TODO We need a way to move all of the javascript/CSS that is for a subview
// into that subview.
/**
* @return The end of a javascript map that is the jquery datatable
* configuration for the jobs table. the Jobs table is assumed to be
* rendered by the class returned from {@link #content()}
*/
private String jobsTableInit() {
return tableInit().
append(", 'aaData': jobsTableData").
append(", bDeferRender: true").
append(", bProcessing: true").
// Sort by id upon page load
append(", aaSorting: [[3, 'desc']]").
append(", aoColumnDefs:[").
// Maps Total, Maps Completed, Reduces Total and Reduces Completed
append("{'sType':'numeric', 'bSearchable': false" +
", 'aTargets': [ 8, 9, 10, 11 ] }").
append("]}").
toString();
}
/**
* @return javascript to add into the jquery block after the table has
* been initialized. This code adds in per field filtering.
*/
private String jobsPostTableInit() {
return "var asInitVals = new Array();\n" +
"$('tfoot input').keyup( function () \n{"+
" jobsDataTable.fnFilter( this.value, $('tfoot input').index(this) );\n"+
"} );\n"+
"$('tfoot input').each( function (i) {\n"+
" asInitVals[i] = this.value;\n"+
"} );\n"+
"$('tfoot input').focus( function () {\n"+
" if ( this.className == 'search_init' )\n"+
" {\n"+
" this.className = '';\n"+
" this.value = '';\n"+
" }\n"+
"} );\n"+
"$('tfoot input').blur( function (i) {\n"+
" if ( this.value == '' )\n"+
" {\n"+
" this.className = 'search_init';\n"+
" this.value = asInitVals[$('tfoot input').index(this)];\n"+
" }\n"+
"} );\n";
}
}
| 4,952 | 34.378571 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
import java.util.Date;
import java.util.List;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
/**
* Render a block of HTML for a give job.
*/
public class HsJobBlock extends HtmlBlock {
final AppContext appContext;
@Inject HsJobBlock(AppContext appctx) {
appContext = appctx;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override protected void render(Block html) {
String jid = $(JOB_ID);
if (jid.isEmpty()) {
html.
p()._("Sorry, can't do anything without a JobID.")._();
return;
}
JobId jobID = MRApps.toJobID(jid);
Job j = appContext.getJob(jobID);
if (j == null) {
html.
p()._("Sorry, ", jid, " not found.")._();
return;
}
List<AMInfo> amInfos = j.getAMInfos();
JobInfo job = new JobInfo(j);
ResponseInfo infoBlock = info("Job Overview").
_("Job Name:", job.getName()).
_("User Name:", job.getUserName()).
_("Queue:", job.getQueueName()).
_("State:", job.getState()).
_("Uberized:", job.isUber()).
_("Submitted:", new Date(job.getSubmitTime())).
_("Started:", new Date(job.getStartTime())).
_("Finished:", new Date(job.getFinishTime())).
_("Elapsed:", StringUtils.formatTime(
Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
String amString =
amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
// todo - switch to use JobInfo
List<String> diagnostics = j.getDiagnostics();
if(diagnostics != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for(String diag: diagnostics) {
b.append(addTaskLinks(diag));
}
infoBlock._r("Diagnostics:", b.toString());
}
if(job.getNumMaps() > 0) {
infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
}
if(job.getNumReduces() > 0) {
infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
}
for (ConfEntryInfo entry : job.getAcls()) {
infoBlock._("ACL "+entry.getName()+":", entry.getValue());
}
DIV<Hamlet> div = html.
_(InfoBlock.class).
div(_INFO_WRAP);
// MRAppMasters Table
TABLE<DIV<Hamlet>> table = div.table("#job");
table.
tr().
th(amString).
_().
tr().
th(_TH, "Attempt Number").
th(_TH, "Start Time").
th(_TH, "Node").
th(_TH, "Logs").
_();
boolean odd = false;
for (AMInfo amInfo : amInfos) {
AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
job.getId(), job.getUserName(), "", "");
table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()).
td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(),
attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress())._().
td().a(".logslink", url(attempt.getShortLogsLink()),
"logs")._().
_();
}
table._();
div._();
html.div(_INFO_WRAP).
// Tasks table
table("#job").
tr().
th(_TH, "Task Type").
th(_TH, "Total").
th(_TH, "Complete")._().
tr(_ODD).
th().
a(url("tasks", jid, "m"), "Map")._().
td(String.valueOf(String.valueOf(job.getMapsTotal()))).
td(String.valueOf(String.valueOf(job.getMapsCompleted())))._().
tr(_EVEN).
th().
a(url("tasks", jid, "r"), "Reduce")._().
td(String.valueOf(String.valueOf(job.getReducesTotal()))).
td(String.valueOf(String.valueOf(job.getReducesCompleted())))._()
._().
// Attempts table
table("#job").
tr().
th(_TH, "Attempt Type").
th(_TH, "Failed").
th(_TH, "Killed").
th(_TH, "Successful")._().
tr(_ODD).
th("Maps").
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.FAILED.toString()),
String.valueOf(job.getFailedMapAttempts()))._().
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.KILLED.toString()),
String.valueOf(job.getKilledMapAttempts()))._().
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.SUCCESSFUL.toString()),
String.valueOf(job.getSuccessfulMapAttempts()))._().
_().
tr(_EVEN).
th("Reduces").
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.FAILED.toString()),
String.valueOf(job.getFailedReduceAttempts()))._().
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.KILLED.toString()),
String.valueOf(job.getKilledReduceAttempts()))._().
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.SUCCESSFUL.toString()),
String.valueOf(job.getSuccessfulReduceAttempts()))._().
_().
_().
_();
}
static String addTaskLinks(String text) {
return TaskID.taskIdPattern.matcher(text).replaceAll(
"<a href=\"/jobhistory/task/$0\">$0</a>");
}
}
| 7,930 | 36.234742 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
/**
* The navigation block for the history server
*/
public class HsNavBlock extends HtmlBlock {
final App app;
@Inject HsNavBlock(App app) { this.app = app; }
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override protected void render(Block html) {
DIV<Hamlet> nav = html.
div("#nav").
h3("Application").
ul().
li().a(url("about"), "About")._().
li().a(url("app"), "Jobs")._()._();
if (app.getJob() != null) {
String jobid = MRApps.toString(app.getJob().getID());
nav.
h3("Job").
ul().
li().a(url("job", jobid), "Overview")._().
li().a(url("jobcounters", jobid), "Counters")._().
li().a(url("conf", jobid), "Configuration")._().
li().a(url("tasks", jobid, "m"), "Map tasks")._().
li().a(url("tasks", jobid, "r"), "Reduce tasks")._()._();
if (app.getTask() != null) {
String taskid = MRApps.toString(app.getTask().getID());
nav.
h3("Task").
ul().
li().a(url("task", taskid), "Task Overview")._().
li().a(url("taskcounters", taskid), "Counters")._()._();
}
}
nav.
h3("Tools").
ul().
li().a("/conf", "Configuration")._().
li().a("/logs", "Local logs")._().
li().a("/stacks", "Server stacks")._().
li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
}
}
| 2,674 | 34.197368 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
import org.apache.hadoop.mapreduce.v2.app.webapp.SingleCounterBlock;
import org.apache.hadoop.yarn.webapp.SubView;
/**
* Render the counters page
*/
public class HsSingleCounterPage extends HsView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
setActiveNavColumnForTask();
set(DATATABLES_ID, "singleCounter");
set(initID(DATATABLES, "singleCounter"), counterTableInit());
setTableStyles(html, "singleCounter");
}
/**
* @return The end of a javascript map that is the jquery datatable
* configuration for the jobs table. the Jobs table is assumed to be
* rendered by the class returned from {@link #content()}
*/
private String counterTableInit() {
return tableInit().
append(", aoColumnDefs:[").
append("{'sType':'title-numeric', 'aTargets': [ 1 ] }").
append("]}").
toString();
}
/**
* The content of this page is the CountersBlock now.
* @return CountersBlock.class
*/
@Override protected Class<? extends SubView> content() {
return SingleCounterBlock.class;
}
}
| 2,154 | 32.671875 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import java.util.Collection;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.common.base.Joiner;
import com.google.inject.Inject;
/**
* A Page the shows the status of a given task
*/
public class HsTaskPage extends HsView {
/**
* A Block of HTML that will render a given task attempt.
*/
static class AttemptsBlock extends HtmlBlock {
final App app;
@Inject
AttemptsBlock(App ctx) {
app = ctx;
}
@Override
protected void render(Block html) {
if (!isValidRequest()) {
html.
h2($(TITLE));
return;
}
TaskType type = null;
String symbol = $(TASK_TYPE);
if (!symbol.isEmpty()) {
type = MRApps.taskType(symbol);
} else {
type = app.getTask().getType();
}
TR<THEAD<TABLE<Hamlet>>> headRow = html.
table("#attempts").
thead().
tr();
headRow.
th(".id", "Attempt").
th(".state", "State").
th(".status", "Status").
th(".node", "Node").
th(".logs", "Logs").
th(".tsh", "Start Time");
if(type == TaskType.REDUCE) {
headRow.th("Shuffle Finish Time");
headRow.th("Merge Finish Time");
}
headRow.th("Finish Time"); //Attempt
if(type == TaskType.REDUCE) {
headRow.th("Elapsed Time Shuffle"); //Attempt
headRow.th("Elapsed Time Merge"); //Attempt
headRow.th("Elapsed Time Reduce"); //Attempt
}
headRow.th("Elapsed Time").
th(".note", "Note");
TBODY<TABLE<Hamlet>> tbody = headRow._()._().tbody();
// Write all the data into a JavaScript array of arrays for JQuery
// DataTables to display
StringBuilder attemptsTableData = new StringBuilder("[\n");
for (TaskAttempt attempt : getTaskAttempts()) {
final TaskAttemptInfo ta = new TaskAttemptInfo(attempt, false);
String taid = ta.getId();
String nodeHttpAddr = ta.getNode();
String containerIdString = ta.getAssignedContainerIdStr();
String nodeIdString = attempt.getAssignedContainerMgrAddress();
String nodeRackName = ta.getRack();
long attemptStartTime = ta.getStartTime();
long shuffleFinishTime = -1;
long sortFinishTime = -1;
long attemptFinishTime = ta.getFinishTime();
long elapsedShuffleTime = -1;
long elapsedSortTime = -1;
long elapsedReduceTime = -1;
if(type == TaskType.REDUCE) {
shuffleFinishTime = attempt.getShuffleFinishTime();
sortFinishTime = attempt.getSortFinishTime();
elapsedShuffleTime =
Times.elapsed(attemptStartTime, shuffleFinishTime, false);
elapsedSortTime =
Times.elapsed(shuffleFinishTime, sortFinishTime, false);
elapsedReduceTime =
Times.elapsed(sortFinishTime, attemptFinishTime, false);
}
long attemptElapsed =
Times.elapsed(attemptStartTime, attemptFinishTime, false);
TaskId taskId = attempt.getID().getTaskId();
attemptsTableData.append("[\"")
.append(getAttemptId(taskId, ta)).append("\",\"")
.append(ta.getState()).append("\",\"")
.append(StringEscapeUtils.escapeJavaScript(
StringEscapeUtils.escapeHtml(ta.getStatus()))).append("\",\"")
.append("<a class='nodelink' href='" + MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddr + "'>")
.append(nodeRackName + "/" + nodeHttpAddr + "</a>\",\"")
.append("<a class='logslink' href='").append(url("logs", nodeIdString
, containerIdString, taid, app.getJob().getUserName()))
.append("'>logs</a>\",\"")
.append(attemptStartTime).append("\",\"");
if(type == TaskType.REDUCE) {
attemptsTableData.append(shuffleFinishTime).append("\",\"")
.append(sortFinishTime).append("\",\"");
}
attemptsTableData.append(attemptFinishTime).append("\",\"");
if(type == TaskType.REDUCE) {
attemptsTableData.append(elapsedShuffleTime).append("\",\"")
.append(elapsedSortTime).append("\",\"")
.append(elapsedReduceTime).append("\",\"");
}
attemptsTableData.append(attemptElapsed).append("\",\"")
.append(StringEscapeUtils.escapeJavaScript(
StringEscapeUtils.escapeHtml(ta.getNote())))
.append("\"],\n");
}
//Remove the last comma and close off the array of arrays
if(attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
attemptsTableData.delete(attemptsTableData.length()-2, attemptsTableData.length()-1);
}
attemptsTableData.append("]");
html.script().$type("text/javascript").
_("var attemptsTableData=" + attemptsTableData)._();
TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
footRow.
th().input("search_init").$type(InputType.text).
$name("attempt_name").$value("Attempt")._()._().
th().input("search_init").$type(InputType.text).
$name("attempt_state").$value("State")._()._().
th().input("search_init").$type(InputType.text).
$name("attempt_status").$value("Status")._()._().
th().input("search_init").$type(InputType.text).
$name("attempt_node").$value("Node")._()._().
th().input("search_init").$type(InputType.text).
$name("attempt_node").$value("Logs")._()._().
th().input("search_init").$type(InputType.text).
$name("attempt_start_time").$value("Start Time")._()._();
if(type == TaskType.REDUCE) {
footRow.
th().input("search_init").$type(InputType.text).
$name("shuffle_time").$value("Shuffle Time")._()._();
footRow.
th().input("search_init").$type(InputType.text).
$name("merge_time").$value("Merge Time")._()._();
}
footRow.
th().input("search_init").$type(InputType.text).
$name("attempt_finish").$value("Finish Time")._()._();
if(type == TaskType.REDUCE) {
footRow.
th().input("search_init").$type(InputType.text).
$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._();
footRow.
th().input("search_init").$type(InputType.text).
$name("elapsed_merge_time").$value("Elapsed Merge Time")._()._();
footRow.
th().input("search_init").$type(InputType.text).
$name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._();
}
footRow.
th().input("search_init").$type(InputType.text).
$name("attempt_elapsed").$value("Elapsed Time")._()._().
th().input("search_init").$type(InputType.text).
$name("note").$value("Note")._()._();
footRow._()._()._();
}
protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) {
return ta.getId();
}
/**
* @return true if this is a valid request else false.
*/
protected boolean isValidRequest() {
return app.getTask() != null;
}
/**
* @return all of the attempts to render.
*/
protected Collection<TaskAttempt> getTaskAttempts() {
return app.getTask().getAttempts().values();
}
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
//override the nav config from commonPReHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
//Set up the java script and CSS for the attempts table
set(DATATABLES_ID, "attempts");
set(initID(DATATABLES, "attempts"), attemptsTableInit());
set(postInitID(DATATABLES, "attempts"), attemptsPostTableInit());
setTableStyles(html, "attempts");
}
/**
* The content of this page is the attempts block
* @return AttemptsBlock.class
*/
@Override protected Class<? extends SubView> content() {
return AttemptsBlock.class;
}
/**
* @return The end of the JS map that is the jquery datatable config for the
* attempts table.
*/
private String attemptsTableInit() {
TaskType type = null;
String symbol = $(TASK_TYPE);
if (!symbol.isEmpty()) {
type = MRApps.taskType(symbol);
} else {
TaskId taskID = MRApps.toTaskID($(TASK_ID));
type = taskID.getTaskType();
}
StringBuilder b = tableInit()
.append(", 'aaData': attemptsTableData")
.append(", bDeferRender: true")
.append(", bProcessing: true")
.append("\n,aoColumnDefs:[\n")
//logs column should not filterable (it includes container ID which may pollute searches)
.append("\n{'aTargets': [ 4 ]")
.append(", 'bSearchable': false }")
.append("\n, {'sType':'numeric', 'aTargets': [ 0 ]")
.append(", 'mRender': parseHadoopID }")
.append("\n, {'sType':'numeric', 'aTargets': [ 5, 6")
//Column numbers are different for maps and reduces
.append(type == TaskType.REDUCE ? ", 7, 8" : "")
.append(" ], 'mRender': renderHadoopDate }")
.append("\n, {'sType':'numeric', 'aTargets': [")
.append(type == TaskType.REDUCE ? "9, 10, 11, 12" : "7")
.append(" ], 'mRender': renderHadoopElapsedTime }]")
// Sort by id upon page load
.append("\n, aaSorting: [[0, 'asc']]")
.append("}");
return b.toString();
}
private String attemptsPostTableInit() {
return "var asInitVals = new Array();\n" +
"$('tfoot input').keyup( function () \n{"+
" attemptsDataTable.fnFilter( this.value, $('tfoot input').index(this) );\n"+
"} );\n"+
"$('tfoot input').each( function (i) {\n"+
" asInitVals[i] = this.value;\n"+
"} );\n"+
"$('tfoot input').focus( function () {\n"+
" if ( this.className == 'search_init' )\n"+
" {\n"+
" this.className = '';\n"+
" this.value = '';\n"+
" }\n"+
"} );\n"+
"$('tfoot input').blur( function (i) {\n"+
" if ( this.value == '' )\n"+
" {\n"+
" this.className = 'search_init';\n"+
" this.value = asInitVals[$('tfoot input').index(this)];\n"+
" }\n"+
"} );\n";
}
}
| 12,928 | 36.693878 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAttemptsPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.ATTEMPT_STATE;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.yarn.webapp.SubView;
import com.google.inject.Inject;
/**
* Render a page showing the attempts made of a given type and a given job.
*/
public class HsAttemptsPage extends HsTaskPage {
static class FewAttemptsBlock extends HsTaskPage.AttemptsBlock {
@Inject
FewAttemptsBlock(App ctx) {
super(ctx);
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsTaskPage.AttemptsBlock#isValidRequest()
* Verify that a job is given.
*/
@Override
protected boolean isValidRequest() {
return app.getJob() != null;
}
@Override
protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) {
return "<a href='" + url("task", taskId.toString()) +
"'>" + ta.getId() + "</a>";
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsTaskPage.AttemptsBlock#getTaskAttempts()
* @return the attempts that are for a given job and a specific type/state.
*/
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
String taskTypeStr = $(TASK_TYPE);
TaskType taskType = MRApps.taskType(taskTypeStr);
String attemptStateStr = $(ATTEMPT_STATE);
TaskAttemptStateUI neededState = MRApps
.taskAttemptState(attemptStateStr);
Job j = app.getJob();
Map<TaskId, Task> tasks = j.getTasks(taskType);
for (Task task : tasks.values()) {
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
for (TaskAttempt attempt : attempts.values()) {
if (neededState.correspondsTo(attempt.getState())) {
fewTaskAttemps.add(attempt);
}
}
}
return fewTaskAttemps;
}
}
/**
* The content will render a different set of task attempts.
* @return FewAttemptsBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return FewAttemptsBlock.class;
}
}
| 3,726 | 34.495238 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by joblicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import java.util.ArrayList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement(name = "jobAttempts")
@XmlAccessorType(XmlAccessType.FIELD)
public class AMAttemptsInfo {
@XmlElement(name = "jobAttempt")
protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>();
public AMAttemptsInfo() {
} // JAXB needs this
public void add(AMAttemptInfo info) {
this.attempt.add(info);
}
public ArrayList<AMAttemptInfo> getAttempts() {
return this.attempt;
}
}
| 1,513 | 31.913043 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/HistoryInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
import org.apache.hadoop.util.VersionInfo;
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
public class HistoryInfo {
protected long startedOn;
protected String hadoopVersion;
protected String hadoopBuildVersion;
protected String hadoopVersionBuiltOn;
public HistoryInfo() {
this.startedOn = JobHistoryServer.historyServerTimeStamp;
this.hadoopVersion = VersionInfo.getVersion();
this.hadoopBuildVersion = VersionInfo.getBuildVersion();
this.hadoopVersionBuiltOn = VersionInfo.getDate();
}
public String getHadoopVersion() {
return this.hadoopVersion;
}
public String getHadoopBuildVersion() {
return this.hadoopBuildVersion;
}
public String getHadoopVersionBuiltOn() {
return this.hadoopVersionBuiltOn;
}
public long getStartedOn() {
return this.startedOn;
}
}
| 1,906 | 30.262295 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by joblicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import java.util.ArrayList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement(name = "jobs")
@XmlAccessorType(XmlAccessType.FIELD)
public class JobsInfo {
protected ArrayList<JobInfo> job = new ArrayList<JobInfo>();
public JobsInfo() {
} // JAXB needs this
public void add(JobInfo jobInfo) {
this.job.add(jobInfo);
}
public ArrayList<JobInfo> getJobs() {
return this.job;
}
}
| 1,380 | 30.386364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@XmlRootElement(name = "jobAttempt")
@XmlAccessorType(XmlAccessType.FIELD)
public class AMAttemptInfo {
protected String nodeHttpAddress;
protected String nodeId;
protected int id;
protected long startTime;
protected String containerId;
protected String logsLink;
@XmlTransient
protected String shortLogsLink;
public AMAttemptInfo() {
}
public AMAttemptInfo(AMInfo amInfo, String jobId, String user, String host,
String pathPrefix) {
this.nodeHttpAddress = "";
this.nodeId = "";
String nmHost = amInfo.getNodeManagerHost();
int nmHttpPort = amInfo.getNodeManagerHttpPort();
int nmPort = amInfo.getNodeManagerPort();
if (nmHost != null) {
this.nodeHttpAddress = nmHost + ":" + nmHttpPort;
NodeId nodeId = NodeId.newInstance(nmHost, nmPort);
this.nodeId = nodeId.toString();
}
this.id = amInfo.getAppAttemptId().getAttemptId();
this.startTime = amInfo.getStartTime();
this.containerId = "";
this.logsLink = "";
this.shortLogsLink = "";
ContainerId containerId = amInfo.getContainerId();
if (containerId != null) {
this.containerId = containerId.toString();
this.logsLink = join(host, pathPrefix,
ujoin("logs", this.nodeId, this.containerId, jobId, user));
this.shortLogsLink = ujoin("logs", this.nodeId, this.containerId,
jobId, user);
}
}
public String getNodeHttpAddress() {
return this.nodeHttpAddress;
}
public String getNodeId() {
return this.nodeId;
}
public int getAttemptId() {
return this.id;
}
public long getStartTime() {
return this.startTime;
}
public String getContainerId() {
return this.containerId;
}
public String getLogsLink() {
return this.logsLink;
}
public String getShortLogsLink() {
return this.shortLogsLink;
}
}
| 3,196 | 29.160377 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
import org.apache.hadoop.mapreduce.v2.hs.CompletedJob;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.security.authorize.AccessControlList;
@XmlRootElement(name = "job")
@XmlAccessorType(XmlAccessType.FIELD)
public class JobInfo {
protected long submitTime;
protected long startTime;
protected long finishTime;
protected String id;
protected String name;
protected String queue;
protected String user;
protected String state;
protected int mapsTotal;
protected int mapsCompleted;
protected int reducesTotal;
protected int reducesCompleted;
protected Boolean uberized;
protected String diagnostics;
protected Long avgMapTime;
protected Long avgReduceTime;
protected Long avgShuffleTime;
protected Long avgMergeTime;
protected Integer failedReduceAttempts;
protected Integer killedReduceAttempts;
protected Integer successfulReduceAttempts;
protected Integer failedMapAttempts;
protected Integer killedMapAttempts;
protected Integer successfulMapAttempts;
protected ArrayList<ConfEntryInfo> acls;
@XmlTransient
protected int numMaps;
@XmlTransient
protected int numReduces;
public JobInfo() {
}
public JobInfo(Job job) {
this.id = MRApps.toString(job.getID());
JobReport report = job.getReport();
this.mapsTotal = job.getTotalMaps();
this.mapsCompleted = job.getCompletedMaps();
this.reducesTotal = job.getTotalReduces();
this.reducesCompleted = job.getCompletedReduces();
this.submitTime = report.getSubmitTime();
this.startTime = report.getStartTime();
this.finishTime = report.getFinishTime();
this.name = job.getName().toString();
this.queue = job.getQueueName();
this.user = job.getUserName();
this.state = job.getState().toString();
this.acls = new ArrayList<ConfEntryInfo>();
if (job instanceof CompletedJob) {
avgMapTime = 0l;
avgReduceTime = 0l;
avgShuffleTime = 0l;
avgMergeTime = 0l;
failedReduceAttempts = 0;
killedReduceAttempts = 0;
successfulReduceAttempts = 0;
failedMapAttempts = 0;
killedMapAttempts = 0;
successfulMapAttempts = 0;
countTasksAndAttempts(job);
this.uberized = job.isUber();
this.diagnostics = "";
List<String> diagnostics = job.getDiagnostics();
if (diagnostics != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagnostics) {
b.append(diag);
}
this.diagnostics = b.toString();
}
Map<JobACL, AccessControlList> allacls = job.getJobACLs();
if (allacls != null) {
for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry
.getValue().getAclString()));
}
}
}
}
public long getNumMaps() {
return numMaps;
}
public long getNumReduces() {
return numReduces;
}
public Long getAvgMapTime() {
return avgMapTime;
}
public Long getAvgReduceTime() {
return avgReduceTime;
}
public Long getAvgShuffleTime() {
return avgShuffleTime;
}
public Long getAvgMergeTime() {
return avgMergeTime;
}
public Integer getFailedReduceAttempts() {
return failedReduceAttempts;
}
public Integer getKilledReduceAttempts() {
return killedReduceAttempts;
}
public Integer getSuccessfulReduceAttempts() {
return successfulReduceAttempts;
}
public Integer getFailedMapAttempts() {
return failedMapAttempts;
}
public Integer getKilledMapAttempts() {
return killedMapAttempts;
}
public Integer getSuccessfulMapAttempts() {
return successfulMapAttempts;
}
public ArrayList<ConfEntryInfo> getAcls() {
return acls;
}
public int getReducesCompleted() {
return this.reducesCompleted;
}
public int getReducesTotal() {
return this.reducesTotal;
}
public int getMapsCompleted() {
return this.mapsCompleted;
}
public int getMapsTotal() {
return this.mapsTotal;
}
public String getState() {
return this.state;
}
public String getUserName() {
return this.user;
}
public String getName() {
return this.name;
}
public String getQueueName() {
return this.queue;
}
public String getId() {
return this.id;
}
public long getSubmitTime() {
return this.submitTime;
}
public long getStartTime() {
return this.startTime;
}
public long getFinishTime() {
return this.finishTime;
}
public Boolean isUber() {
return this.uberized;
}
public String getDiagnostics() {
return this.diagnostics;
}
/**
* Go through a job and update the member variables with counts for
* information to output in the page.
*
* @param job
* the job to get counts for.
*/
private void countTasksAndAttempts(Job job) {
numReduces = 0;
numMaps = 0;
final Map<TaskId, Task> tasks = job.getTasks();
if (tasks == null) {
return;
}
for (Task task : tasks.values()) {
// Attempts counts
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
int successful, failed, killed;
for (TaskAttempt attempt : attempts.values()) {
successful = 0;
failed = 0;
killed = 0;
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
.getState())) {
++successful;
} else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
++failed;
} else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
++killed;
}
switch (task.getType()) {
case MAP:
successfulMapAttempts += successful;
failedMapAttempts += failed;
killedMapAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numMaps++;
avgMapTime += (attempt.getFinishTime() - attempt.getLaunchTime());
}
break;
case REDUCE:
successfulReduceAttempts += successful;
failedReduceAttempts += failed;
killedReduceAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numReduces++;
avgShuffleTime += (attempt.getShuffleFinishTime() - attempt
.getLaunchTime());
avgMergeTime += attempt.getSortFinishTime()
- attempt.getShuffleFinishTime();
avgReduceTime += (attempt.getFinishTime() - attempt
.getSortFinishTime());
}
break;
}
}
}
if (numMaps > 0) {
avgMapTime = avgMapTime / numMaps;
}
if (numReduces > 0) {
avgReduceTime = avgReduceTime / numReduces;
avgShuffleTime = avgShuffleTime / numReduces;
avgMergeTime = avgMergeTime / numReduces;
}
}
}
| 8,880 | 27.015773 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/client/HSAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol;
import org.apache.hadoop.mapreduce.v2.hs.HSProxies;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@Private
public class HSAdmin extends Configured implements Tool {
public HSAdmin() {
super();
}
public HSAdmin(JobConf conf) {
super(conf);
}
@Override
public void setConf(Configuration conf) {
if (conf != null) {
conf = addSecurityConfiguration(conf);
}
super.setConf(conf);
}
private Configuration addSecurityConfiguration(Configuration conf) {
conf = new JobConf(conf);
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(JHAdminConfig.MR_HISTORY_PRINCIPAL, ""));
return conf;
}
/**
* Displays format of commands.
*
* @param cmd
* The command that is being executed.
*/
private static void printUsage(String cmd) {
if ("-refreshUserToGroupsMappings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshUserToGroupsMappings]");
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshSuperUserGroupsConfiguration]");
} else if ("-refreshAdminAcls".equals(cmd)) {
System.err.println("Usage: mapred hsadmin [-refreshAdminAcls]");
} else if ("-refreshLoadedJobCache".equals(cmd)) {
System.err.println("Usage: mapred hsadmin [-refreshLoadedJobCache]");
} else if ("-refreshJobRetentionSettings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshJobRetentionSettings]");
} else if ("-refreshLogRetentionSettings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshLogRetentionSettings]");
} else if ("-getGroups".equals(cmd)) {
System.err.println("Usage: mapred hsadmin" + " [-getGroups [username]]");
} else {
System.err.println("Usage: mapred hsadmin");
System.err.println(" [-refreshUserToGroupsMappings]");
System.err.println(" [-refreshSuperUserGroupsConfiguration]");
System.err.println(" [-refreshAdminAcls]");
System.err.println(" [-refreshLoadedJobCache]");
System.err.println(" [-refreshJobRetentionSettings]");
System.err.println(" [-refreshLogRetentionSettings]");
System.err.println(" [-getGroups [username]]");
System.err.println(" [-help [cmd]]");
System.err.println();
ToolRunner.printGenericCommandUsage(System.err);
}
}
private static void printHelp(String cmd) {
String summary = "hsadmin is the command to execute Job History server administrative commands.\n"
+ "The full syntax is: \n\n"
+ "mapred hsadmin"
+ " [-refreshUserToGroupsMappings]"
+ " [-refreshSuperUserGroupsConfiguration]"
+ " [-refreshAdminAcls]"
+ " [-refreshLoadedJobCache]"
+ " [-refreshLogRetentionSettings]"
+ " [-refreshJobRetentionSettings]"
+ " [-getGroups [username]]" + " [-help [cmd]]\n";
String refreshUserToGroupsMappings = "-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n";
String refreshSuperUserGroupsConfiguration = "-refreshSuperUserGroupsConfiguration: Refresh superuser proxy groups mappings\n";
String refreshAdminAcls = "-refreshAdminAcls: Refresh acls for administration of Job history server\n";
String refreshLoadedJobCache = "-refreshLoadedJobCache: Refresh loaded job cache of Job history server\n";
String refreshJobRetentionSettings = "-refreshJobRetentionSettings:" +
"Refresh job history period,job cleaner settings\n";
String refreshLogRetentionSettings = "-refreshLogRetentionSettings:" +
"Refresh log retention period and log retention check interval\n";
String getGroups = "-getGroups [username]: Get the groups which given user belongs to\n";
String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n"
+ "\t\tis specified.\n";
if ("refreshUserToGroupsMappings".equals(cmd)) {
System.out.println(refreshUserToGroupsMappings);
} else if ("help".equals(cmd)) {
System.out.println(help);
} else if ("refreshSuperUserGroupsConfiguration".equals(cmd)) {
System.out.println(refreshSuperUserGroupsConfiguration);
} else if ("refreshAdminAcls".equals(cmd)) {
System.out.println(refreshAdminAcls);
} else if ("refreshLoadedJobCache".equals(cmd)) {
System.out.println(refreshLoadedJobCache);
} else if ("refreshJobRetentionSettings".equals(cmd)) {
System.out.println(refreshJobRetentionSettings);
} else if ("refreshLogRetentionSettings".equals(cmd)) {
System.out.println(refreshLogRetentionSettings);
} else if ("getGroups".equals(cmd)) {
System.out.println(getGroups);
} else {
System.out.println(summary);
System.out.println(refreshUserToGroupsMappings);
System.out.println(refreshSuperUserGroupsConfiguration);
System.out.println(refreshAdminAcls);
System.out.println(refreshLoadedJobCache);
System.out.println(refreshJobRetentionSettings);
System.out.println(refreshLogRetentionSettings);
System.out.println(getGroups);
System.out.println(help);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
}
}
private int getGroups(String[] usernames) throws IOException {
// Get groups users belongs to
if (usernames.length == 0) {
usernames = new String[] { UserGroupInformation.getCurrentUser()
.getUserName() };
}
// Get the current configuration
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
GetUserMappingsProtocol getUserMappingProtocol = HSProxies.createProxy(
conf, address, GetUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser());
for (String username : usernames) {
StringBuilder sb = new StringBuilder();
sb.append(username + " :");
for (String group : getUserMappingProtocol.getGroupsForUser(username)) {
sb.append(" ");
sb.append(group);
}
System.out.println(sb);
}
return 0;
}
private int refreshUserToGroupsMappings() throws IOException {
// Get the current configuration
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
RefreshUserMappingsProtocol refreshProtocol = HSProxies.createProxy(conf,
address, RefreshUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser());
// Refresh the user-to-groups mappings
refreshProtocol.refreshUserToGroupsMappings();
return 0;
}
private int refreshSuperUserGroupsConfiguration() throws IOException {
// Refresh the super-user groups
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
RefreshUserMappingsProtocol refreshProtocol = HSProxies.createProxy(conf,
address, RefreshUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser());
// Refresh the super-user group mappings
refreshProtocol.refreshSuperUserGroupsConfiguration();
return 0;
}
private int refreshAdminAcls() throws IOException {
// Refresh the admin acls
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
HSAdminRefreshProtocol refreshProtocol = HSProxies.createProxy(conf,
address, HSAdminRefreshProtocol.class,
UserGroupInformation.getCurrentUser());
refreshProtocol.refreshAdminAcls();
return 0;
}
private int refreshLoadedJobCache() throws IOException {
// Refresh the loaded job cache
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
HSAdminRefreshProtocol refreshProtocol = HSProxies.createProxy(conf,
address, HSAdminRefreshProtocol.class,
UserGroupInformation.getCurrentUser());
refreshProtocol.refreshLoadedJobCache();
return 0;
}
private int refreshJobRetentionSettings() throws IOException {
// Refresh job retention settings
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
HSAdminRefreshProtocol refreshProtocol = HSProxies.createProxy(conf,
address, HSAdminRefreshProtocol.class,
UserGroupInformation.getCurrentUser());
refreshProtocol.refreshJobRetentionSettings();
return 0;
}
private int refreshLogRetentionSettings() throws IOException {
// Refresh log retention settings
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
HSAdminRefreshProtocol refreshProtocol = HSProxies.createProxy(conf,
address, HSAdminRefreshProtocol.class,
UserGroupInformation.getCurrentUser());
refreshProtocol.refreshLogRetentionSettings();
return 0;
}
@Override
public int run(String[] args) throws Exception {
if (args.length < 1) {
printUsage("");
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
if ("-refreshUserToGroupsMappings".equals(cmd)
|| "-refreshSuperUserGroupsConfiguration".equals(cmd)
|| "-refreshAdminAcls".equals(cmd)
|| "-refreshLoadedJobCache".equals(cmd)
|| "-refreshJobRetentionSettings".equals(cmd)
|| "-refreshLogRetentionSettings".equals(cmd)) {
if (args.length != 1) {
printUsage(cmd);
return exitCode;
}
}
exitCode = 0;
if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls();
} else if ("-refreshLoadedJobCache".equals(cmd)) {
exitCode = refreshLoadedJobCache();
} else if ("-refreshJobRetentionSettings".equals(cmd)) {
exitCode = refreshJobRetentionSettings();
} else if ("-refreshLogRetentionSettings".equals(cmd)) {
exitCode = refreshLogRetentionSettings();
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-help".equals(cmd)) {
if (i < args.length) {
printHelp(args[i]);
} else {
printHelp("");
}
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("");
}
return exitCode;
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf();
int result = ToolRunner.run(new HSAdmin(conf), args);
System.exit(result);
}
}
| 13,301 | 36.365169 | 131 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.server;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.WritableRpcEngine;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.mapreduce.v2.api.HSAdminProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB;
import org.apache.hadoop.mapreduce.v2.app.security.authorize.ClientHSPolicyProvider;
import org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger;
import org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger.AuditConstants;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.HSAdminRefreshProtocolService;
import org.apache.hadoop.mapreduce.v2.hs.protocolPB.HSAdminRefreshProtocolServerSideTranslatorPB;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
@Private
public class HSAdminServer extends AbstractService implements HSAdminProtocol {
private static final Log LOG = LogFactory.getLog(HSAdminServer.class);
private AccessControlList adminAcl;
private AggregatedLogDeletionService aggLogDelService = null;
/** The RPC server that listens to requests from clients */
protected RPC.Server clientRpcServer;
protected InetSocketAddress clientRpcAddress;
private static final String HISTORY_ADMIN_SERVER = "HSAdminServer";
private JobHistory jobHistoryService = null;
private UserGroupInformation loginUGI;
public HSAdminServer(AggregatedLogDeletionService aggLogDelService,
JobHistory jobHistoryService) {
super(HSAdminServer.class.getName());
this.aggLogDelService = aggLogDelService;
this.jobHistoryService = jobHistoryService;
}
@Override
public void serviceInit(Configuration conf) throws Exception {
RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class,
ProtobufRpcEngine.class);
RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB(
this);
BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
.newReflectiveBlockingService(refreshUserMappingXlator);
GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB(
this);
BlockingService getUserMappingService = GetUserMappingsProtocolService
.newReflectiveBlockingService(getUserMappingXlator);
HSAdminRefreshProtocolServerSideTranslatorPB refreshHSAdminProtocolXlator = new HSAdminRefreshProtocolServerSideTranslatorPB(
this);
BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService
.newReflectiveBlockingService(refreshHSAdminProtocolXlator);
WritableRpcEngine.ensureInitialized();
clientRpcAddress = conf.getSocketAddr(
JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
clientRpcServer = new RPC.Builder(conf)
.setProtocol(RefreshUserMappingsProtocolPB.class)
.setInstance(refreshUserMappingService)
.setBindAddress(clientRpcAddress.getHostName())
.setPort(clientRpcAddress.getPort()).setVerbose(false).build();
addProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService);
addProtocol(conf, HSAdminRefreshProtocolPB.class,
refreshHSAdminProtocolService);
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
clientRpcServer.refreshServiceAcl(conf, new ClientHSPolicyProvider());
}
adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
}
@Override
protected void serviceStart() throws Exception {
if (UserGroupInformation.isSecurityEnabled()) {
loginUGI = UserGroupInformation.getLoginUser();
} else {
loginUGI = UserGroupInformation.getCurrentUser();
}
clientRpcServer.start();
}
@VisibleForTesting
UserGroupInformation getLoginUGI() {
return loginUGI;
}
@VisibleForTesting
void setLoginUGI(UserGroupInformation ugi) {
loginUGI = ugi;
}
@Override
protected void serviceStop() throws Exception {
if (clientRpcServer != null) {
clientRpcServer.stop();
}
}
private void addProtocol(Configuration conf, Class<?> protocol,
BlockingService blockingService) throws IOException {
RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
clientRpcServer.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol,
blockingService);
}
private UserGroupInformation checkAcls(String method) throws IOException {
UserGroupInformation user;
try {
user = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
LOG.warn("Couldn't get current user", ioe);
HSAuditLogger.logFailure("UNKNOWN", method, adminAcl.toString(),
HISTORY_ADMIN_SERVER, "Couldn't get current user");
throw ioe;
}
if (!adminAcl.isUserAllowed(user)) {
LOG.warn("User " + user.getShortUserName() + " doesn't have permission"
+ " to call '" + method + "'");
HSAuditLogger.logFailure(user.getShortUserName(), method,
adminAcl.toString(), HISTORY_ADMIN_SERVER,
AuditConstants.UNAUTHORIZED_USER);
throw new AccessControlException("User " + user.getShortUserName()
+ " doesn't have permission" + " to call '" + method + "'");
}
LOG.info("HS Admin: " + method + " invoked by user "
+ user.getShortUserName());
return user;
}
@Override
public String[] getGroupsForUser(String user) throws IOException {
return UserGroupInformation.createRemoteUser(user).getGroupNames();
}
@Override
public void refreshUserToGroupsMappings() throws IOException {
UserGroupInformation user = checkAcls("refreshUserToGroupsMappings");
Groups.getUserToGroupsMappingService().refresh();
HSAuditLogger.logSuccess(user.getShortUserName(),
"refreshUserToGroupsMappings", HISTORY_ADMIN_SERVER);
}
@Override
public void refreshSuperUserGroupsConfiguration() throws IOException {
UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration");
ProxyUsers.refreshSuperUserGroupsConfiguration(createConf());
HSAuditLogger.logSuccess(user.getShortUserName(),
"refreshSuperUserGroupsConfiguration", HISTORY_ADMIN_SERVER);
}
protected Configuration createConf() {
return new Configuration();
}
@Override
public void refreshAdminAcls() throws IOException {
UserGroupInformation user = checkAcls("refreshAdminAcls");
Configuration conf = createConf();
adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
HSAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls",
HISTORY_ADMIN_SERVER);
}
@Override
public void refreshLoadedJobCache() throws IOException {
UserGroupInformation user = checkAcls("refreshLoadedJobCache");
try {
jobHistoryService.refreshLoadedJobCache();
} catch (UnsupportedOperationException e) {
HSAuditLogger.logFailure(user.getShortUserName(),
"refreshLoadedJobCache", adminAcl.toString(), HISTORY_ADMIN_SERVER,
e.getMessage());
throw e;
}
HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLoadedJobCache",
HISTORY_ADMIN_SERVER);
}
@Override
public void refreshLogRetentionSettings() throws IOException {
UserGroupInformation user = checkAcls("refreshLogRetentionSettings");
try {
loginUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
aggLogDelService.refreshLogRetentionSettings();
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
HSAuditLogger.logSuccess(user.getShortUserName(),
"refreshLogRetentionSettings", "HSAdminServer");
}
@Override
public void refreshJobRetentionSettings() throws IOException {
UserGroupInformation user = checkAcls("refreshJobRetentionSettings");
try {
loginUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
jobHistoryService.refreshJobRetentionSettings();
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
HSAuditLogger.logSuccess(user.getShortUserName(),
"refreshJobRetentionSettings", HISTORY_ADMIN_SERVER);
}
}
| 10,962 | 36.67354 | 135 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.StringReader;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Test to make sure that command line output for
* job monitoring is correct and prints 100% for map and reduce before
* successful completion.
*/
public class TestJobMonitorAndPrint extends TestCase {
private Job job;
private Configuration conf;
private ClientProtocol clientProtocol;
@Before
public void setUp() throws IOException {
conf = new Configuration();
clientProtocol = mock(ClientProtocol.class);
Cluster cluster = mock(Cluster.class);
when(cluster.getConf()).thenReturn(conf);
when(cluster.getClient()).thenReturn(clientProtocol);
JobStatus jobStatus = new JobStatus(new JobID("job_000", 1), 0f, 0f, 0f, 0f,
State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-jobfile", "tmp-url");
job = Job.getInstance(cluster, jobStatus, conf);
job = spy(job);
}
@Test
public void testJobMonitorAndPrint() throws Exception {
JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-queue", "tmp-jobfile", "tmp-url", true);
JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-queue", "tmp-jobfile", "tmp-url", true);
doAnswer(
new Answer<TaskCompletionEvent[]>() {
@Override
public TaskCompletionEvent[] answer(InvocationOnMock invocation)
throws Throwable {
return new TaskCompletionEvent[0];
}
}
).when(job).getTaskCompletionEvents(anyInt(), anyInt());
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
// setup the logger to capture all logs
Layout layout =
Logger.getRootLogger().getAppender("stdout").getLayout();
ByteArrayOutputStream os = new ByteArrayOutputStream();
WriterAppender appender = new WriterAppender(layout, os);
appender.setThreshold(Level.ALL);
Logger qlogger = Logger.getLogger(Job.class);
qlogger.addAppender(appender);
job.monitorAndPrintJob();
qlogger.removeAppender(appender);
LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
String line;
boolean foundHundred = false;
boolean foundComplete = false;
boolean foundUber = false;
String uberModeMatch = "uber mode : true";
String progressMatch = "map 100% reduce 100%";
String completionMatch = "completed successfully";
while ((line = r.readLine()) != null) {
if (line.contains(uberModeMatch)) {
foundUber = true;
}
foundHundred = line.contains(progressMatch);
if (foundHundred)
break;
}
line = r.readLine();
foundComplete = line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
}
| 5,038 | 36.325926 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.mapreduce.task.reduce.Shuffle;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.ReduceTask;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapred.ShuffleConsumerPlugin;
import org.apache.hadoop.mapred.RawKeyValueIterator;
/**
* A JUnit for testing availability and accessibility of shuffle related API.
* It is needed for maintaining comptability with external sub-classes of
* ShuffleConsumerPlugin and AuxiliaryService(s) like ShuffleHandler.
*
* The importance of this test is for preserving API with 3rd party plugins.
*/
public class TestShufflePlugin<K, V> {
static class TestShuffleConsumerPlugin<K, V> implements ShuffleConsumerPlugin<K, V> {
@Override
public void init(ShuffleConsumerPlugin.Context<K, V> context) {
// just verify that Context has kept its public interface
context.getReduceId();
context.getJobConf();
context.getLocalFS();
context.getUmbilical();
context.getLocalDirAllocator();
context.getReporter();
context.getCodec();
context.getCombinerClass();
context.getCombineCollector();
context.getSpilledRecordsCounter();
context.getReduceCombineInputCounter();
context.getShuffledMapsCounter();
context.getReduceShuffleBytes();
context.getFailedShuffleCounter();
context.getMergedMapOutputsCounter();
context.getStatus();
context.getCopyPhase();
context.getMergePhase();
context.getReduceTask();
context.getMapOutputFile();
}
@Override
public void close(){
}
@Override
public RawKeyValueIterator run() throws java.io.IOException, java.lang.InterruptedException{
return null;
}
}
@Test
/**
* A testing method instructing core hadoop to load an external ShuffleConsumerPlugin
* as if it came from a 3rd party.
*/
public void testPluginAbility() {
try{
// create JobConf with mapreduce.job.shuffle.consumer.plugin=TestShuffleConsumerPlugin
JobConf jobConf = new JobConf();
jobConf.setClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,
TestShufflePlugin.TestShuffleConsumerPlugin.class,
ShuffleConsumerPlugin.class);
ShuffleConsumerPlugin shuffleConsumerPlugin = null;
Class<? extends ShuffleConsumerPlugin> clazz =
jobConf.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN, Shuffle.class, ShuffleConsumerPlugin.class);
assertNotNull("Unable to get " + MRConfig.SHUFFLE_CONSUMER_PLUGIN, clazz);
// load 3rd party plugin through core's factory method
shuffleConsumerPlugin = ReflectionUtils.newInstance(clazz, jobConf);
assertNotNull("Unable to load " + MRConfig.SHUFFLE_CONSUMER_PLUGIN, shuffleConsumerPlugin);
}
catch (Exception e) {
assertTrue("Threw exception:" + e, false);
}
}
@Test
/**
* A testing method verifying availability and accessibility of API that is needed
* for sub-classes of ShuffleConsumerPlugin
*/
public void testConsumerApi() {
JobConf jobConf = new JobConf();
ShuffleConsumerPlugin<K, V> shuffleConsumerPlugin = new TestShuffleConsumerPlugin<K, V>();
//mock creation
ReduceTask mockReduceTask = mock(ReduceTask.class);
TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class);
Reporter mockReporter = mock(Reporter.class);
FileSystem mockFileSystem = mock(FileSystem.class);
Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = jobConf.getCombinerClass();
@SuppressWarnings("unchecked") // needed for mock with generic
CombineOutputCollector<K, V> mockCombineOutputCollector =
(CombineOutputCollector<K, V>) mock(CombineOutputCollector.class);
org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID =
mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
CompressionCodec mockCompressionCodec = mock(CompressionCodec.class);
Counter mockCounter = mock(Counter.class);
TaskStatus mockTaskStatus = mock(TaskStatus.class);
Progress mockProgress = mock(Progress.class);
MapOutputFile mockMapOutputFile = mock(MapOutputFile.class);
Task mockTask = mock(Task.class);
try {
String [] dirs = jobConf.getLocalDirs();
// verify that these APIs are available through super class handler
ShuffleConsumerPlugin.Context<K, V> context =
new ShuffleConsumerPlugin.Context<K, V>(mockTaskAttemptID, jobConf, mockFileSystem,
mockUmbilical, mockLocalDirAllocator,
mockReporter, mockCompressionCodec,
combinerClass, mockCombineOutputCollector,
mockCounter, mockCounter, mockCounter,
mockCounter, mockCounter, mockCounter,
mockTaskStatus, mockProgress, mockProgress,
mockTask, mockMapOutputFile, null);
shuffleConsumerPlugin.init(context);
shuffleConsumerPlugin.run();
shuffleConsumerPlugin.close();
}
catch (Exception e) {
assertTrue("Threw exception:" + e, false);
}
// verify that these APIs are available for 3rd party plugins
mockReduceTask.getTaskID();
mockReduceTask.getJobID();
mockReduceTask.getNumMaps();
mockReduceTask.getPartition();
mockReporter.progress();
}
@Test
/**
* A testing method verifying availability and accessibility of API needed for
* AuxiliaryService(s) which are "Shuffle-Providers" (ShuffleHandler and 3rd party plugins)
*/
public void testProviderApi() {
LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
JobConf mockJobConf = mock(JobConf.class);
try {
mockLocalDirAllocator.getLocalPathToRead("", mockJobConf);
}
catch (Exception e) {
assertTrue("Threw exception:" + e, false);
}
}
}
| 7,613 | 38.863874 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
public class TestJob {
@Test
public void testJobToString() throws IOException, InterruptedException {
Cluster cluster = mock(Cluster.class);
ClientProtocol client = mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid = new JobID("1014873536921", 6);
JobStatus status = new JobStatus(jobid, 0.0f, 0.0f, 0.0f, 0.0f,
State.FAILED, JobPriority.NORMAL, "root", "TestJobToString",
"job file", "tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid, TaskType.MAP)).thenReturn(
new TaskReport[0]);
when(client.getTaskReports(jobid, TaskType.REDUCE)).thenReturn(
new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid, 0, 10)).thenReturn(
new TaskCompletionEvent[0]);
Job job = Job.getInstance(cluster, status, new JobConf());
Assert.assertNotNull(job.toString());
}
@Test
public void testUGICredentialsPropogation() throws Exception {
Credentials creds = new Credentials();
Token<?> token = mock(Token.class);
Text tokenService = new Text("service");
Text secretName = new Text("secret");
byte secret[] = new byte[]{};
creds.addToken(tokenService, token);
creds.addSecretKey(secretName, secret);
UserGroupInformation.getLoginUser().addCredentials(creds);
JobConf jobConf = new JobConf();
Job job = new Job(jobConf);
assertSame(token, job.getCredentials().getToken(tokenService));
assertSame(secret, job.getCredentials().getSecretKey(secretName));
}
}
| 2,926 | 37.513158 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestContextFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Before;
import org.junit.Test;
public class TestContextFactory {
JobID jobId;
Configuration conf;
JobContext jobContext;
@Before
public void setUp() throws Exception {
conf = new Configuration();
jobId = new JobID("test", 1);
jobContext = new JobContextImpl(conf, jobId);
}
@Test
public void testCloneContext() throws Exception {
ContextFactory.cloneContext(jobContext, conf);
}
@Test
public void testCloneMapContext() throws Exception {
TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
TaskAttemptID taskAttemptid = new TaskAttemptID(taskId, 0);
MapContext<IntWritable, IntWritable, IntWritable, IntWritable> mapContext =
new MapContextImpl<IntWritable, IntWritable, IntWritable, IntWritable>(
conf, taskAttemptid, null, null, null, null, null);
Mapper<IntWritable, IntWritable, IntWritable, IntWritable>.Context mapperContext =
new WrappedMapper<IntWritable, IntWritable, IntWritable, IntWritable>().getMapContext(
mapContext);
ContextFactory.cloneMapContext(mapperContext, conf, null, null);
}
@Before
public void tearDown() throws Exception {
}
}
| 2,282 | 34.671875 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.filecache;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestClientDistributedCacheManager {
private static final Log LOG = LogFactory.getLog(
TestClientDistributedCacheManager.class);
private static final String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp")).toURI()
.toString().replace(' ', '+');
private static final String TEST_VISIBILITY_DIR =
new File(TEST_ROOT_DIR, "TestCacheVisibility").toURI()
.toString().replace(' ', '+');
private FileSystem fs;
private Path firstCacheFile;
private Path secondCacheFile;
private Path thirdCacheFile;
private Configuration conf;
@Before
public void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
thirdCacheFile = new Path(TEST_VISIBILITY_DIR,"thirdCachefile");
createTempFile(firstCacheFile, conf);
createTempFile(secondCacheFile, conf);
createTempFile(thirdCacheFile, conf);
}
@After
public void tearDown() throws IOException {
if (!fs.delete(firstCacheFile, false)) {
LOG.warn("Failed to delete firstcachefile");
}
if (!fs.delete(secondCacheFile, false)) {
LOG.warn("Failed to delete secondcachefile");
}
if (!fs.delete(thirdCacheFile, false)) {
LOG.warn("Failed to delete thirdCachefile");
}
}
@Test
public void testDetermineTimestamps() throws IOException {
Job job = Job.getInstance(conf);
job.addCacheFile(firstCacheFile.toUri());
job.addCacheFile(secondCacheFile.toUri());
Configuration jobConf = job.getConfiguration();
Map<URI, FileStatus> statCache = new HashMap<URI, FileStatus>();
ClientDistributedCacheManager.determineTimestamps(jobConf, statCache);
FileStatus firstStatus = statCache.get(firstCacheFile.toUri());
FileStatus secondStatus = statCache.get(secondCacheFile.toUri());
Assert.assertNotNull(firstStatus);
Assert.assertNotNull(secondStatus);
Assert.assertEquals(2, statCache.size());
String expected = firstStatus.getModificationTime() + ","
+ secondStatus.getModificationTime();
Assert.assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
}
@Test
public void testDetermineCacheVisibilities() throws IOException {
Path workingdir = new Path(TEST_VISIBILITY_DIR);
fs.setWorkingDirectory(workingdir);
fs.setPermission(workingdir, new FsPermission((short)00777));
fs.setPermission(new Path(TEST_ROOT_DIR), new FsPermission((short)00700));
Job job = Job.getInstance(conf);
Path relativePath = new Path("thirdCachefile");
job.addCacheFile(relativePath.toUri());
Configuration jobConf = job.getConfiguration();
Map<URI, FileStatus> statCache = new HashMap<URI, FileStatus>();
ClientDistributedCacheManager.
determineCacheVisibilities(jobConf, statCache);
Assert.assertFalse(jobConf.
getBoolean(MRJobConfig.CACHE_FILE_VISIBILITIES,true));
}
@SuppressWarnings("deprecation")
void createTempFile(Path p, Configuration conf) throws IOException {
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, conf, p,
Text.class, Text.class,
CompressionType.NONE);
writer.append(new Text("text"), new Text("moretext"));
} catch(Exception e) {
throw new IOException(e.getLocalizedMessage());
} finally {
if (writer != null) {
writer.close();
}
writer = null;
}
LOG.info("created: " + p);
}
}
| 5,279 | 35.923077 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/tools/TestCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.tools;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.junit.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.doReturn;
public class TestCLI {
private static String jobIdStr = "job_1015298225799_0015";
@Test
public void testListAttemptIdsWithValidInput() throws Exception {
JobID jobId = JobID.forName(jobIdStr);
Cluster mockCluster = mock(Cluster.class);
Job job = mock(Job.class);
CLI cli = spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(job.getTaskReports(TaskType.MAP)).thenReturn(
getTaskReports(jobId, TaskType.MAP));
when(job.getTaskReports(TaskType.REDUCE)).thenReturn(
getTaskReports(jobId, TaskType.REDUCE));
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_MAP = cli.run(new String[] { "-list-attempt-ids", jobIdStr,
"MAP", "running" });
// testing case insensitive behavior
int retCode_map = cli.run(new String[] { "-list-attempt-ids", jobIdStr,
"map", "running" });
int retCode_REDUCE = cli.run(new String[] { "-list-attempt-ids", jobIdStr,
"REDUCE", "running" });
int retCode_completed = cli.run(new String[] { "-list-attempt-ids",
jobIdStr, "REDUCE", "completed" });
assertEquals("MAP is a valid input,exit code should be 0", 0, retCode_MAP);
assertEquals("map is a valid input,exit code should be 0", 0, retCode_map);
assertEquals("REDUCE is a valid input,exit code should be 0", 0,
retCode_REDUCE);
assertEquals(
"REDUCE and completed are a valid inputs to -list-attempt-ids,exit code should be 0",
0, retCode_completed);
verify(job, times(2)).getTaskReports(TaskType.MAP);
verify(job, times(2)).getTaskReports(TaskType.REDUCE);
}
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
JobID jobId = JobID.forName(jobIdStr);
Cluster mockCluster = mock(Cluster.class);
Job job = mock(Job.class);
CLI cli = spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
jobIdStr, "JOB_SETUP", "running" });
int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
jobIdStr, "JOB_CLEANUP", "running" });
int retCode_invalidTaskState = cli.run(new String[] { "-list-attempt-ids",
jobIdStr, "REDUCE", "complete" });
assertEquals("JOB_SETUP is an invalid input,exit code should be -1", -1,
retCode_JOB_SETUP);
assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1", -1,
retCode_JOB_CLEANUP);
assertEquals("complete is an invalid input,exit code should be -1", -1,
retCode_invalidTaskState);
}
private TaskReport[] getTaskReports(JobID jobId, TaskType type) {
return new TaskReport[] { new TaskReport(), new TaskReport() };
}
@Test
public void testJobKIll() throws Exception {
Cluster mockCluster = mock(Cluster.class);
CLI cli = spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
String jobId1 = "job_1234654654_001";
String jobId2 = "job_1234654654_002";
String jobId3 = "job_1234654654_003";
String jobId4 = "job_1234654654_004";
Job mockJob1 = mockJob(mockCluster, jobId1, State.RUNNING);
Job mockJob2 = mockJob(mockCluster, jobId2, State.KILLED);
Job mockJob3 = mockJob(mockCluster, jobId3, State.FAILED);
Job mockJob4 = mockJob(mockCluster, jobId4, State.PREP);
int exitCode1 = cli.run(new String[] { "-kill", jobId1 });
assertEquals(0, exitCode1);
verify(mockJob1, times(1)).killJob();
int exitCode2 = cli.run(new String[] { "-kill", jobId2 });
assertEquals(-1, exitCode2);
verify(mockJob2, times(0)).killJob();
int exitCode3 = cli.run(new String[] { "-kill", jobId3 });
assertEquals(-1, exitCode3);
verify(mockJob3, times(0)).killJob();
int exitCode4 = cli.run(new String[] { "-kill", jobId4 });
assertEquals(0, exitCode4);
verify(mockJob4, times(1)).killJob();
}
private Job mockJob(Cluster mockCluster, String jobId, State jobState)
throws IOException, InterruptedException {
Job mockJob = mock(Job.class);
when(mockCluster.getJob(JobID.forName(jobId))).thenReturn(mockJob);
JobStatus status = new JobStatus(null, 0, 0, 0, 0, jobState,
JobPriority.HIGH, null, null, null, null);
when(mockJob.getStatus()).thenReturn(status);
return mockJob;
}
}
| 5,932 | 37.777778 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import static org.junit.Assert.*;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper.MockFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Master;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestTokenCache {
private static Configuration conf;
private static String renewer;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
renewer = Master.getMasterPrincipal(conf);
}
@Test
public void testObtainTokens() throws Exception {
Credentials credentials = new Credentials();
FileSystem fs = mock(FileSystem.class);
TokenCache.obtainTokensForNamenodesInternal(fs, credentials, conf);
verify(fs).addDelegationTokens(eq(renewer), eq(credentials));
}
@Test
@SuppressWarnings("deprecation")
public void testBinaryCredentialsWithoutScheme() throws Exception {
testBinaryCredentials(false);
}
@Test
@SuppressWarnings("deprecation")
public void testBinaryCredentialsWithScheme() throws Exception {
testBinaryCredentials(true);
}
private void testBinaryCredentials(boolean hasScheme) throws Exception {
Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","test/build/data"));
// ick, but need fq path minus file:/
String binaryTokenFile = hasScheme
? FileSystem.getLocal(conf).makeQualified(
new Path(TEST_ROOT_DIR, "tokenFile")).toString()
: FileSystem.getLocal(conf).makeQualified(
new Path(TEST_ROOT_DIR, "tokenFile")).toUri().getPath();
MockFileSystem fs1 = createFileSystemForServiceName("service1");
MockFileSystem fs2 = createFileSystemForServiceName("service2");
MockFileSystem fs3 = createFileSystemForServiceName("service3");
// get the tokens for fs1 & fs2 and write out to binary creds file
Credentials creds = new Credentials();
Token<?> token1 = fs1.getDelegationToken(renewer);
Token<?> token2 = fs2.getDelegationToken(renewer);
creds.addToken(token1.getService(), token1);
creds.addToken(token2.getService(), token2);
// wait to set, else the obtain tokens call above will fail with FNF
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
// re-init creds and add a newer token for fs1
creds = new Credentials();
Token<?> newerToken1 = fs1.getDelegationToken(renewer);
assertNotSame(newerToken1, token1);
creds.addToken(newerToken1.getService(), newerToken1);
checkToken(creds, newerToken1);
// get token for fs1, see that fs2's token was loaded
TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
checkToken(creds, newerToken1, token2);
// get token for fs2, nothing should change since already present
TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
checkToken(creds, newerToken1, token2);
// get token for fs3, should only add token for fs3
TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
Token<?> token3 = creds.getToken(new Text(fs3.getCanonicalServiceName()));
assertTrue(token3 != null);
checkToken(creds, newerToken1, token2, token3);
// be paranoid, check one last time that nothing changes
TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf);
TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf);
checkToken(creds, newerToken1, token2, token3);
}
private void checkToken(Credentials creds, Token<?> ... tokens) {
assertEquals(tokens.length, creds.getAllTokens().size());
for (Token<?> token : tokens) {
Token<?> credsToken = creds.getToken(token.getService());
assertTrue(credsToken != null);
assertEquals(token, credsToken);
}
}
private MockFileSystem createFileSystemForServiceName(final String service)
throws IOException {
MockFileSystem mockFs = new MockFileSystem();
when(mockFs.getCanonicalServiceName()).thenReturn(service);
when(mockFs.getDelegationToken(any(String.class))).thenAnswer(
new Answer<Token<?>>() {
int unique = 0;
@Override
public Token<?> answer(InvocationOnMock invocation) throws Throwable {
Token<?> token = new Token<TokenIdentifier>();
token.setService(new Text(service));
// use unique value so when we restore from token storage, we can
// tell if it's really the same token
token.setKind(new Text("token" + unique++));
return token;
}
});
return mockFs;
}
@Test
public void testSingleTokenFetch() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
String renewer = Master.getMasterPrincipal(conf);
Credentials credentials = new Credentials();
final MockFileSystem fs = new MockFileSystem();
final MockFileSystem mockFs = (MockFileSystem) fs.getRawFileSystem();
when(mockFs.getCanonicalServiceName()).thenReturn("host:0");
when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0"));
Path mockPath = mock(Path.class);
when(mockPath.getFileSystem(conf)).thenReturn(mockFs);
Path[] paths = new Path[]{ mockPath, mockPath };
when(mockFs.addDelegationTokens("me", credentials)).thenReturn(null);
TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
verify(mockFs, times(1)).addDelegationTokens(renewer, credentials);
}
@Test
public void testCleanUpTokenReferral() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, "foo");
TokenCache.cleanUpTokenReferral(conf);
assertNull(conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY));
}
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
URISyntaxException {
Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data", "test/build/data"));
// ick, but need fq path minus file:/
String binaryTokenFile =
FileSystem.getLocal(conf)
.makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
.getPath();
MockFileSystem fs1 = createFileSystemForServiceName("service1");
Credentials creds = new Credentials();
Token<?> token1 = fs1.getDelegationToken(renewer);
creds.addToken(token1.getService(), token1);
// wait to set, else the obtain tokens call above will fail with FNF
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
String fs_addr = fs1.getCanonicalServiceName();
Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
assertNotNull("Token for nn is null", nnt);
}
}
| 8,579 | 39.857143 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/split/TestJobSplitWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.split;
import static org.junit.Assert.assertEquals;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.junit.Test;
public class TestJobSplitWriter {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestJobSplitWriter");
@Test
public void testMaxBlockLocationsNewSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf = new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
Path submitDir = new Path(TEST_DIR.getAbsolutePath());
FileSystem fs = FileSystem.getLocal(conf);
FileSplit split = new FileSplit(new Path("/some/path"), 0, 1,
new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
JobSplitWriter.createSplitFiles(submitDir, conf, fs,
new FileSplit[] { split });
JobSplit.TaskSplitMetaInfo[] infos =
SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
submitDir);
assertEquals("unexpected number of splits", 1, infos.length);
assertEquals("unexpected number of split locations",
4, infos[0].getLocations().length);
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
@Test
public void testMaxBlockLocationsOldSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf = new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
Path submitDir = new Path(TEST_DIR.getAbsolutePath());
FileSystem fs = FileSystem.getLocal(conf);
org.apache.hadoop.mapred.FileSplit split =
new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"), 0, 1,
new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
JobSplitWriter.createSplitFiles(submitDir, conf, fs,
new org.apache.hadoop.mapred.InputSplit[] { split });
JobSplit.TaskSplitMetaInfo[] infos =
SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
submitDir);
assertEquals("unexpected number of splits", 1, infos.length);
assertEquals("unexpected number of split locations",
4, infos[0].getLocations().length);
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
}
| 3,400 | 38.091954 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import java.io.FilterInputStream;
import java.lang.Void;
import java.net.HttpURLConnection;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskID;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import static org.junit.Assert.*;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.util.ArrayList;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.IFileInputStream;
import org.apache.hadoop.mapred.IFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.nimbusds.jose.util.StringUtils;
/**
* Test that the Fetcher does what we expect it to.
*/
public class TestFetcher {
private static final Log LOG = LogFactory.getLog(TestFetcher.class);
JobConf job = null;
JobConf jobWithRetry = null;
TaskAttemptID id = null;
ShuffleSchedulerImpl<Text, Text> ss = null;
MergeManagerImpl<Text, Text> mm = null;
Reporter r = null;
ShuffleClientMetrics metrics = null;
ExceptionReporter except = null;
SecretKey key = null;
HttpURLConnection connection = null;
Counters.Counter allErrs = null;
final String encHash = "vFE234EIFCiBgYs2tCXY/SjT8Kg=";
final MapHost host = new MapHost("localhost", "http://localhost:8080/");
final TaskAttemptID map1ID = TaskAttemptID.forName("attempt_0_1_m_1_1");
final TaskAttemptID map2ID = TaskAttemptID.forName("attempt_0_1_m_2_1");
FileSystem fs = null;
@Rule public TestName name = new TestName();
@Before
@SuppressWarnings("unchecked") // mocked generics
public void setup() {
LOG.info(">>>> " + name.getMethodName());
job = new JobConf();
job.setBoolean(MRJobConfig.SHUFFLE_FETCH_RETRY_ENABLED, false);
jobWithRetry = new JobConf();
jobWithRetry.setBoolean(MRJobConfig.SHUFFLE_FETCH_RETRY_ENABLED, true);
id = TaskAttemptID.forName("attempt_0_1_r_1_1");
ss = mock(ShuffleSchedulerImpl.class);
mm = mock(MergeManagerImpl.class);
r = mock(Reporter.class);
metrics = mock(ShuffleClientMetrics.class);
except = mock(ExceptionReporter.class);
key = JobTokenSecretManager.createSecretKey(new byte[]{0,0,0,0});
connection = mock(HttpURLConnection.class);
allErrs = mock(Counters.Counter.class);
when(r.getCounter(anyString(), anyString())).thenReturn(allErrs);
ArrayList<TaskAttemptID> maps = new ArrayList<TaskAttemptID>(1);
maps.add(map1ID);
maps.add(map2ID);
when(ss.getMapsForHost(host)).thenReturn(maps);
}
@After
public void teardown() throws IllegalArgumentException, IOException {
LOG.info("<<<< " + name.getMethodName());
if (fs != null) {
fs.delete(new Path(name.getMethodName()),true);
}
}
@Test
public void testReduceOutOfDiskSpace() throws Throwable {
LOG.info("testReduceOutOfDiskSpace");
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
when(connection.getInputStream()).thenReturn(in);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenThrow(new DiskErrorException("No disk space available"));
underTest.copyFromHost(host);
verify(ss).reportLocalError(any(IOException.class));
}
@Test(timeout=30000)
public void testCopyFromHostConnectionTimeout() throws Exception {
when(connection.getInputStream()).thenThrow(
new SocketTimeoutException("This is a fake timeout :)"));
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
underTest.copyFromHost(host);
verify(connection).addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
verify(allErrs).increment(1);
verify(ss).copyFailed(map1ID, host, false, false);
verify(ss).copyFailed(map2ID, host, false, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@Test
public void testCopyFromHostBogusHeader() throws Exception {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ByteArrayInputStream in = new ByteArrayInputStream(
"\u00010 BOGUS DATA\nBOGUS DATA\nBOGUS DATA\n".getBytes());
when(connection.getInputStream()).thenReturn(in);
underTest.copyFromHost(host);
verify(connection).addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
verify(allErrs).increment(1);
verify(ss).copyFailed(map1ID, host, true, false);
verify(ss).copyFailed(map2ID, host, true, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@Test
public void testCopyFromHostIncompatibleShuffleVersion() throws Exception {
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn("mapreduce").thenReturn("other").thenReturn("other");
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn("1.0.1").thenReturn("1.0.0").thenReturn("1.0.1");
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ByteArrayInputStream in = new ByteArrayInputStream(new byte[0]);
when(connection.getInputStream()).thenReturn(in);
for (int i = 0; i < 3; ++i) {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
underTest.copyFromHost(host);
}
verify(connection, times(3)).addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
verify(allErrs, times(3)).increment(1);
verify(ss, times(3)).copyFailed(map1ID, host, false, false);
verify(ss, times(3)).copyFailed(map2ID, host, false, false);
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@Test
public void testCopyFromHostIncompatibleShuffleVersionWithRetry()
throws Exception {
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn("mapreduce").thenReturn("other").thenReturn("other");
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn("1.0.1").thenReturn("1.0.0").thenReturn("1.0.1");
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ByteArrayInputStream in = new ByteArrayInputStream(new byte[0]);
when(connection.getInputStream()).thenReturn(in);
for (int i = 0; i < 3; ++i) {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(jobWithRetry,
id, ss, mm, r, metrics, except, key, connection);
underTest.copyFromHost(host);
}
verify(connection, times(3)).addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
verify(allErrs, times(3)).increment(1);
verify(ss, times(3)).copyFailed(map1ID, host, false, false);
verify(ss, times(3)).copyFailed(map2ID, host, false, false);
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@Test
public void testCopyFromHostWait() throws Exception {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
//Defaults to null, which is what we want to test
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(null);
underTest.copyFromHost(host);
verify(connection)
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
encHash);
verify(allErrs, never()).increment(1);
verify(ss, never()).copyFailed(map1ID, host, true, false);
verify(ss, never()).copyFailed(map2ID, host, true, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostCompressFailure() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doThrow(new java.lang.InternalError()).when(immo)
.shuffle(any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(connection)
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
encHash);
verify(ss, times(1)).copyFailed(map1ID, host, true, false);
}
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostWithRetry() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
ss = mock(ShuffleSchedulerImpl.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(jobWithRetry,
id, ss, mm, r, metrics, except, key, connection, true);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
final long retryTime = Time.monotonicNow();
doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock ignore) throws IOException {
// Emulate host down for 3 seconds.
if ((Time.monotonicNow() - retryTime) <= 3000) {
throw new java.lang.InternalError();
}
return null;
}
}).when(immo).shuffle(any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(ss, never()).copyFailed(any(TaskAttemptID.class),any(MapHost.class),
anyBoolean(), anyBoolean());
}
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostWithRetryThenTimeout() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(jobWithRetry,
id, ss, mm, r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200)
.thenThrow(new SocketTimeoutException("forced timeout"));
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doThrow(new IOException("forced error")).when(immo).shuffle(
any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(allErrs).increment(1);
verify(ss).copyFailed(map1ID, host, false, false);
}
@Test
public void testCopyFromHostExtraBytes() throws Exception {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 14, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bout);
IFileOutputStream ios = new IFileOutputStream(dos);
header.write(dos);
ios.write("MAPDATA123".getBytes());
ios.finish();
ShuffleHeader header2 = new ShuffleHeader(map2ID.toString(), 14, 10, 1);
IFileOutputStream ios2 = new IFileOutputStream(dos);
header2.write(dos);
ios2.write("MAPDATA456".getBytes());
ios2.finish();
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
// 8 < 10 therefore there appear to be extra bytes in the IFileInputStream
IFileWrappedMapOutput<Text,Text> mapOut = new InMemoryMapOutput<Text, Text>(
job, map1ID, mm, 8, null, true );
IFileWrappedMapOutput<Text,Text> mapOut2 = new InMemoryMapOutput<Text, Text>(
job, map2ID, mm, 10, null, true );
when(mm.reserve(eq(map1ID), anyLong(), anyInt())).thenReturn(mapOut);
when(mm.reserve(eq(map2ID), anyLong(), anyInt())).thenReturn(mapOut2);
underTest.copyFromHost(host);
verify(allErrs).increment(1);
verify(ss).copyFailed(map1ID, host, true, false);
verify(ss, never()).copyFailed(map2ID, host, true, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
@Test
public void testCorruptedIFile() throws Exception {
final int fetcher = 7;
Path onDiskMapOutputPath = new Path(name.getMethodName() + "/foo");
Path shuffledToDisk =
OnDiskMapOutput.getTempPath(onDiskMapOutputPath, fetcher);
fs = FileSystem.getLocal(job).getRaw();
IFileWrappedMapOutput<Text,Text> odmo =
new OnDiskMapOutput<Text,Text>(map1ID, mm, 100L, job, fetcher, true,
fs, onDiskMapOutputPath);
String mapData = "MAPDATA12345678901234567890";
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 14, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bout);
IFileOutputStream ios = new IFileOutputStream(dos);
header.write(dos);
int headerSize = dos.size();
try {
ios.write(mapData.getBytes());
} finally {
ios.close();
}
int dataSize = bout.size() - headerSize;
// Ensure that the OnDiskMapOutput shuffler can successfully read the data.
MapHost host = new MapHost("TestHost", "http://test/url");
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
try {
// Read past the shuffle header.
bin.read(new byte[headerSize], 0, headerSize);
odmo.shuffle(host, bin, dataSize, dataSize, metrics, Reporter.NULL);
} finally {
bin.close();
}
// Now corrupt the IFile data.
byte[] corrupted = bout.toByteArray();
corrupted[headerSize + (dataSize / 2)] = 0x0;
try {
bin = new ByteArrayInputStream(corrupted);
// Read past the shuffle header.
bin.read(new byte[headerSize], 0, headerSize);
odmo.shuffle(host, bin, dataSize, dataSize, metrics, Reporter.NULL);
fail("OnDiskMapOutput.shuffle didn't detect the corrupted map partition file");
} catch(ChecksumException e) {
LOG.info("The expected checksum exception was thrown.", e);
} finally {
bin.close();
}
// Ensure that the shuffled file can be read.
IFileInputStream iFin = new IFileInputStream(fs.open(shuffledToDisk), dataSize, job);
try {
iFin.read(new byte[dataSize], 0, dataSize);
} finally {
iFin.close();
}
}
@Test(timeout=10000)
public void testInterruptInMemory() throws Exception {
final int FETCHER = 2;
IFileWrappedMapOutput<Text,Text> immo = spy(new InMemoryMapOutput<Text,Text>(
job, id, mm, 100, null, true));
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in =
new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}).when(connection).disconnect();
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection, FETCHER);
underTest.start();
// wait for read in inputstream
in.waitForFetcher();
underTest.shutDown();
underTest.join(); // rely on test timeout to kill if stuck
assertTrue(in.wasClosedProperly());
verify(immo).abort();
}
@Test(timeout=10000)
public void testInterruptOnDisk() throws Exception {
final int FETCHER = 7;
Path p = new Path("file:///tmp/foo");
Path pTmp = OnDiskMapOutput.getTempPath(p, FETCHER);
FileSystem mFs = mock(FileSystem.class, RETURNS_DEEP_STUBS);
IFileWrappedMapOutput<Text,Text> odmo =
spy(new OnDiskMapOutput<Text,Text>(map1ID, mm, 100L, job,
FETCHER, true, mFs, p));
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(odmo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in =
new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}).when(connection).disconnect();
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection, FETCHER);
underTest.start();
// wait for read in inputstream
in.waitForFetcher();
underTest.shutDown();
underTest.join(); // rely on test timeout to kill if stuck
assertTrue(in.wasClosedProperly());
verify(mFs).create(eq(pTmp));
verify(mFs).delete(eq(pTmp), eq(false));
verify(odmo).abort();
}
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostWithRetryUnreserve() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(jobWithRetry,
id, ss, mm, r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
// Verify that unreserve occurs if an exception happens after shuffle
// buffer is reserved.
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doThrow(new IOException("forced error")).when(immo).shuffle(
any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(immo).abort();
}
public static class FakeFetcher<K,V> extends Fetcher<K,V> {
// If connection need to be reopen.
private boolean renewConnection = false;
public FakeFetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K,V> scheduler, MergeManagerImpl<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter, SecretKey jobTokenSecret,
HttpURLConnection connection) {
super(job, reduceId, scheduler, merger, reporter, metrics,
exceptionReporter, jobTokenSecret);
this.connection = connection;
}
public FakeFetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K,V> scheduler, MergeManagerImpl<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter, SecretKey jobTokenSecret,
HttpURLConnection connection, boolean renewConnection) {
super(job, reduceId, scheduler, merger, reporter, metrics,
exceptionReporter, jobTokenSecret);
this.connection = connection;
this.renewConnection = renewConnection;
}
public FakeFetcher(JobConf job, TaskAttemptID reduceId,
ShuffleSchedulerImpl<K,V> scheduler, MergeManagerImpl<K,V> merger,
Reporter reporter, ShuffleClientMetrics metrics,
ExceptionReporter exceptionReporter, SecretKey jobTokenSecret,
HttpURLConnection connection, int id) {
super(job, reduceId, scheduler, merger, reporter, metrics,
exceptionReporter, jobTokenSecret, id);
this.connection = connection;
}
@Override
protected void openConnection(URL url) throws IOException {
if (null == connection || renewConnection) {
super.openConnection(url);
}
// already 'opened' the mocked connection
return;
}
}
static class StuckInputStream extends FilterInputStream {
boolean stuck = false;
volatile boolean closed = false;
StuckInputStream(InputStream inner) {
super(inner);
}
int freeze() throws IOException {
synchronized (this) {
stuck = true;
notify();
}
// connection doesn't throw InterruptedException, but may return some
// bytes geq 0 or throw an exception
while (!Thread.currentThread().isInterrupted() || closed) {
// spin
if (closed) {
throw new IOException("underlying stream closed, triggered an error");
}
}
return 0;
}
@Override
public int read() throws IOException {
int ret = super.read();
if (ret != -1) {
return ret;
}
return freeze();
}
@Override
public int read(byte[] b) throws IOException {
int ret = super.read(b);
if (ret != -1) {
return ret;
}
return freeze();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int ret = super.read(b, off, len);
if (ret != -1) {
return ret;
}
return freeze();
}
@Override
public void close() throws IOException {
closed = true;
}
public synchronized void waitForFetcher() throws InterruptedException {
while (!stuck) {
wait();
}
}
public boolean wasClosedProperly() {
return closed;
}
}
}
| 31,284 | 39.212082 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestEventFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Test;
import org.mockito.InOrder;
public class TestEventFetcher {
@Test
public void testConsecutiveFetch()
throws IOException, InterruptedException {
final int MAX_EVENTS_TO_FETCH = 100;
TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 1);
TaskUmbilicalProtocol umbilical = mock(TaskUmbilicalProtocol.class);
when(umbilical.getMapCompletionEvents(any(JobID.class),
anyInt(), anyInt(), any(TaskAttemptID.class)))
.thenReturn(getMockedCompletionEventsUpdate(0, 0));
when(umbilical.getMapCompletionEvents(any(JobID.class),
eq(0), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
.thenReturn(getMockedCompletionEventsUpdate(0, MAX_EVENTS_TO_FETCH));
when(umbilical.getMapCompletionEvents(any(JobID.class),
eq(MAX_EVENTS_TO_FETCH), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
.thenReturn(getMockedCompletionEventsUpdate(MAX_EVENTS_TO_FETCH,
MAX_EVENTS_TO_FETCH));
when(umbilical.getMapCompletionEvents(any(JobID.class),
eq(MAX_EVENTS_TO_FETCH*2), eq(MAX_EVENTS_TO_FETCH), eq(tid)))
.thenReturn(getMockedCompletionEventsUpdate(MAX_EVENTS_TO_FETCH*2, 3));
@SuppressWarnings("unchecked")
ShuffleScheduler<String,String> scheduler =
mock(ShuffleScheduler.class);
ExceptionReporter reporter = mock(ExceptionReporter.class);
EventFetcherForTest<String,String> ef =
new EventFetcherForTest<String,String>(tid, umbilical, scheduler,
reporter, MAX_EVENTS_TO_FETCH);
ef.getMapCompletionEvents();
verify(reporter, never()).reportException(any(Throwable.class));
InOrder inOrder = inOrder(umbilical);
inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
eq(0), eq(MAX_EVENTS_TO_FETCH), eq(tid));
inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
eq(MAX_EVENTS_TO_FETCH), eq(MAX_EVENTS_TO_FETCH), eq(tid));
inOrder.verify(umbilical).getMapCompletionEvents(any(JobID.class),
eq(MAX_EVENTS_TO_FETCH*2), eq(MAX_EVENTS_TO_FETCH), eq(tid));
verify(scheduler, times(MAX_EVENTS_TO_FETCH*2 + 3)).resolve(
any(TaskCompletionEvent.class));
}
private MapTaskCompletionEventsUpdate getMockedCompletionEventsUpdate(
int startIdx, int numEvents) {
ArrayList<TaskCompletionEvent> tceList =
new ArrayList<TaskCompletionEvent>(numEvents);
for (int i = 0; i < numEvents; ++i) {
int eventIdx = startIdx + i;
TaskCompletionEvent tce = new TaskCompletionEvent(eventIdx,
new TaskAttemptID("12345", 1, TaskType.MAP, eventIdx, 0),
eventIdx, true, TaskCompletionEvent.Status.SUCCEEDED,
"http://somehost:8888");
tceList.add(tce);
}
TaskCompletionEvent[] events = {};
return new MapTaskCompletionEventsUpdate(tceList.toArray(events), false);
}
private static class EventFetcherForTest<K,V> extends EventFetcher<K,V> {
public EventFetcherForTest(TaskAttemptID reduce,
TaskUmbilicalProtocol umbilical, ShuffleScheduler<K,V> scheduler,
ExceptionReporter reporter, int maxEventsToFetch) {
super(reduce, umbilical, scheduler, reporter, maxEventsToFetch);
}
@Override
public int getMapCompletionEvents()
throws IOException, InterruptedException {
return super.getMapCompletionEvents();
}
}
}
| 4,980 | 40.508333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.IFile;
import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MROutputFiles;
import org.apache.hadoop.mapred.Merger;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapred.RawKeyValueIterator;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestMerger {
private Configuration conf;
private JobConf jobConf;
private FileSystem fs;
@Before
public void setup() throws IOException {
conf = new Configuration();
jobConf = new JobConf();
fs = FileSystem.getLocal(conf);
}
@Test
public void testEncryptedMerger() throws Throwable {
jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
TokenCache.setEncryptedSpillKey(new byte[16], credentials);
UserGroupInformation.getCurrentUser().addCredentials(credentials);
testInMemoryAndOnDiskMerger();
}
@Test
public void testInMemoryAndOnDiskMerger() throws Throwable {
JobID jobId = new JobID("a", 0);
TaskAttemptID reduceId1 = new TaskAttemptID(
new TaskID(jobId, TaskType.REDUCE, 0), 0);
TaskAttemptID mapId1 = new TaskAttemptID(
new TaskID(jobId, TaskType.MAP, 1), 0);
TaskAttemptID mapId2 = new TaskAttemptID(
new TaskID(jobId, TaskType.MAP, 2), 0);
LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR);
MergeManagerImpl<Text, Text> mergeManager = new MergeManagerImpl<Text, Text>(
reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null,
null, null, new Progress(), new MROutputFiles());
// write map outputs
Map<String, String> map1 = new TreeMap<String, String>();
map1.put("apple", "disgusting");
map1.put("carrot", "delicious");
Map<String, String> map2 = new TreeMap<String, String>();
map1.put("banana", "pretty good");
byte[] mapOutputBytes1 = writeMapOutput(conf, map1);
byte[] mapOutputBytes2 = writeMapOutput(conf, map2);
InMemoryMapOutput<Text, Text> mapOutput1 = new InMemoryMapOutput<Text, Text>(
conf, mapId1, mergeManager, mapOutputBytes1.length, null, true);
InMemoryMapOutput<Text, Text> mapOutput2 = new InMemoryMapOutput<Text, Text>(
conf, mapId2, mergeManager, mapOutputBytes2.length, null, true);
System.arraycopy(mapOutputBytes1, 0, mapOutput1.getMemory(), 0,
mapOutputBytes1.length);
System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0,
mapOutputBytes2.length);
// create merger and run merge
MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger =
mergeManager.createInMemoryMerger();
List<InMemoryMapOutput<Text, Text>> mapOutputs1 =
new ArrayList<InMemoryMapOutput<Text, Text>>();
mapOutputs1.add(mapOutput1);
mapOutputs1.add(mapOutput2);
inMemoryMerger.merge(mapOutputs1);
Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
TaskAttemptID reduceId2 = new TaskAttemptID(
new TaskID(jobId, TaskType.REDUCE, 3), 0);
TaskAttemptID mapId3 = new TaskAttemptID(
new TaskID(jobId, TaskType.MAP, 4), 0);
TaskAttemptID mapId4 = new TaskAttemptID(
new TaskID(jobId, TaskType.MAP, 5), 0);
// write map outputs
Map<String, String> map3 = new TreeMap<String, String>();
map3.put("apple", "awesome");
map3.put("carrot", "amazing");
Map<String, String> map4 = new TreeMap<String, String>();
map4.put("banana", "bla");
byte[] mapOutputBytes3 = writeMapOutput(conf, map3);
byte[] mapOutputBytes4 = writeMapOutput(conf, map4);
InMemoryMapOutput<Text, Text> mapOutput3 = new InMemoryMapOutput<Text, Text>(
conf, mapId3, mergeManager, mapOutputBytes3.length, null, true);
InMemoryMapOutput<Text, Text> mapOutput4 = new InMemoryMapOutput<Text, Text>(
conf, mapId4, mergeManager, mapOutputBytes4.length, null, true);
System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0,
mapOutputBytes3.length);
System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0,
mapOutputBytes4.length);
// // create merger and run merge
MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger2 =
mergeManager.createInMemoryMerger();
List<InMemoryMapOutput<Text, Text>> mapOutputs2 =
new ArrayList<InMemoryMapOutput<Text, Text>>();
mapOutputs2.add(mapOutput3);
mapOutputs2.add(mapOutput4);
inMemoryMerger2.merge(mapOutputs2);
Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size());
List<CompressAwarePath> paths = new ArrayList<CompressAwarePath>();
Iterator<CompressAwarePath> iterator = mergeManager.onDiskMapOutputs.iterator();
List<String> keys = new ArrayList<String>();
List<String> values = new ArrayList<String>();
while (iterator.hasNext()) {
CompressAwarePath next = iterator.next();
readOnDiskMapOutput(conf, fs, next, keys, values);
paths.add(next);
}
Assert.assertEquals(keys, Arrays.asList("apple", "banana", "carrot", "apple", "banana", "carrot"));
Assert.assertEquals(values, Arrays.asList("awesome", "bla", "amazing", "disgusting", "pretty good", "delicious"));
mergeManager.close();
mergeManager = new MergeManagerImpl<Text, Text>(
reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null,
null, null, new Progress(), new MROutputFiles());
MergeThread<CompressAwarePath,Text,Text> onDiskMerger = mergeManager.createOnDiskMerger();
onDiskMerger.merge(paths);
Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
keys = new ArrayList<String>();
values = new ArrayList<String>();
readOnDiskMapOutput(conf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values);
Assert.assertEquals(keys, Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot"));
Assert.assertEquals(values, Arrays.asList("awesome", "disgusting", "pretty good", "bla", "amazing", "delicious"));
mergeManager.close();
Assert.assertEquals(0, mergeManager.inMemoryMapOutputs.size());
Assert.assertEquals(0, mergeManager.inMemoryMergedMapOutputs.size());
Assert.assertEquals(0, mergeManager.onDiskMapOutputs.size());
}
private byte[] writeMapOutput(Configuration conf, Map<String, String> keysToValues)
throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
FSDataOutputStream fsdos = new FSDataOutputStream(baos, null);
IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, fsdos,
Text.class, Text.class, null, null);
for (String key : keysToValues.keySet()) {
String value = keysToValues.get(key);
writer.append(new Text(key), new Text(value));
}
writer.close();
return baos.toByteArray();
}
private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path,
List<String> keys, List<String> values) throws IOException {
FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));
IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in,
fs.getFileStatus(path).getLen(), null, null);
DataInputBuffer keyBuff = new DataInputBuffer();
DataInputBuffer valueBuff = new DataInputBuffer();
Text key = new Text();
Text value = new Text();
while (reader.nextRawKey(keyBuff)) {
key.readFields(keyBuff);
keys.add(key.toString());
reader.nextRawValue(valueBuff);
value.readFields(valueBuff);
values.add(value.toString());
}
}
@Test
public void testCompressed() throws IOException {
testMergeShouldReturnProperProgress(getCompressedSegments());
}
@Test
public void testUncompressed() throws IOException {
testMergeShouldReturnProperProgress(getUncompressedSegments());
}
@SuppressWarnings( { "unchecked" })
public void testMergeShouldReturnProperProgress(
List<Segment<Text, Text>> segments) throws IOException {
Path tmpDir = new Path("localpath");
Class<Text> keyClass = (Class<Text>) jobConf.getMapOutputKeyClass();
Class<Text> valueClass = (Class<Text>) jobConf.getMapOutputValueClass();
RawComparator<Text> comparator = jobConf.getOutputKeyComparator();
Counter readsCounter = new Counter();
Counter writesCounter = new Counter();
Progress mergePhase = new Progress();
RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass,
valueClass, segments, 2, tmpDir, comparator, getReporter(),
readsCounter, writesCounter, mergePhase);
final float epsilon = 0.00001f;
// Reading 6 keys total, 3 each in 2 segments, so each key read moves the
// progress forward 1/6th of the way. Initially the first keys from each
// segment have been read as part of the merge setup, so progress = 2/6.
Assert.assertEquals(2/6.0f, mergeQueue.getProgress().get(), epsilon);
// The first next() returns one of the keys already read during merge setup
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(2/6.0f, mergeQueue.getProgress().get(), epsilon);
// Subsequent next() calls should read one key and move progress
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(3/6.0f, mergeQueue.getProgress().get(), epsilon);
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(4/6.0f, mergeQueue.getProgress().get(), epsilon);
// At this point we've exhausted all of the keys in one segment
// so getting the next key will return the already cached key from the
// other segment
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(4/6.0f, mergeQueue.getProgress().get(), epsilon);
// Subsequent next() calls should read one key and move progress
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(5/6.0f, mergeQueue.getProgress().get(), epsilon);
Assert.assertTrue(mergeQueue.next());
Assert.assertEquals(1.0f, mergeQueue.getProgress().get(), epsilon);
// Now there should be no more input
Assert.assertFalse(mergeQueue.next());
Assert.assertEquals(1.0f, mergeQueue.getProgress().get(), epsilon);
Assert.assertTrue(mergeQueue.getKey() == null);
Assert.assertEquals(0, mergeQueue.getValue().getData().length);
}
private Progressable getReporter() {
Progressable reporter = new Progressable() {
@Override
public void progress() {
}
};
return reporter;
}
private List<Segment<Text, Text>> getUncompressedSegments() throws IOException {
List<Segment<Text, Text>> segments = new ArrayList<Segment<Text, Text>>();
for (int i = 0; i < 2; i++) {
segments.add(getUncompressedSegment(i));
}
return segments;
}
private List<Segment<Text, Text>> getCompressedSegments() throws IOException {
List<Segment<Text, Text>> segments = new ArrayList<Segment<Text, Text>>();
for (int i = 0; i < 2; i++) {
segments.add(getCompressedSegment(i));
}
return segments;
}
private Segment<Text, Text> getUncompressedSegment(int i) throws IOException {
return new Segment<Text, Text>(getReader(i, false), false);
}
private Segment<Text, Text> getCompressedSegment(int i) throws IOException {
return new Segment<Text, Text>(getReader(i, true), false, 3000l);
}
@SuppressWarnings("unchecked")
private Reader<Text, Text> getReader(int i, boolean isCompressedInput)
throws IOException {
Reader<Text, Text> readerMock = mock(Reader.class);
when(readerMock.getLength()).thenReturn(30l);
when(readerMock.getPosition()).thenReturn(0l).thenReturn(10l).thenReturn(
20l);
when(
readerMock.nextRawKey(any(DataInputBuffer.class)))
.thenAnswer(getKeyAnswer("Segment" + i, isCompressedInput));
doAnswer(getValueAnswer("Segment" + i)).when(readerMock).nextRawValue(
any(DataInputBuffer.class));
return readerMock;
}
private Answer<?> getKeyAnswer(final String segmentName,
final boolean isCompressedInput) {
return new Answer<Object>() {
int i = 0;
@SuppressWarnings("unchecked")
public Boolean answer(InvocationOnMock invocation) {
if (i++ == 3) {
return false;
}
Reader<Text,Text> mock = (Reader<Text,Text>) invocation.getMock();
int multiplier = isCompressedInput ? 100 : 1;
mock.bytesRead += 10 * multiplier;
Object[] args = invocation.getArguments();
DataInputBuffer key = (DataInputBuffer) args[0];
key.reset(("Segment Key " + segmentName + i).getBytes(), 20);
return true;
}
};
}
private Answer<?> getValueAnswer(final String segmentName) {
return new Answer<Void>() {
int i = 0;
public Void answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
DataInputBuffer key = (DataInputBuffer) args[0];
key.reset(("Segment Value " + segmentName + i).getBytes(), 20);
return null;
}
};
}
}
| 15,635 | 39.71875 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BoundedByteArrayOutputStream;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MROutputFiles;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
public class TestMergeManager {
@Test(timeout=10000)
public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES = 10000;
final int OUTPUT_SIZE = 7950;
JobConf conf = new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES, TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, 0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 0.9f);
TestExceptionReporter reporter = new TestExceptionReporter();
CyclicBarrier mergeStart = new CyclicBarrier(2);
CyclicBarrier mergeComplete = new CyclicBarrier(2);
StubbedMergeManager mgr = new StubbedMergeManager(conf, reporter,
mergeStart, mergeComplete);
// reserve enough map output to cause a merge when it is committed
MapOutput<Text, Text> out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput<Text, Text> mout1 = (InMemoryMapOutput<Text, Text>)out1;
fillOutput(mout1);
MapOutput<Text, Text> out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput<Text, Text> mout2 = (InMemoryMapOutput<Text, Text>)out2;
fillOutput(mout2);
// next reservation should be a WAIT
MapOutput<Text, Text> out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertEquals("Should be told to wait", null, out3);
// trigger the first merge and wait for merge thread to start merging
// and free enough output to reserve more
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1, mgr.getNumMerges());
// reserve enough map output to cause another merge when committed
out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out1 instanceof InMemoryMapOutput));
mout1 = (InMemoryMapOutput<Text, Text>)out1;
fillOutput(mout1);
out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out2 instanceof InMemoryMapOutput));
mout2 = (InMemoryMapOutput<Text, Text>)out2;
fillOutput(mout2);
// next reservation should be null
out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertEquals("Should be told to wait", null, out3);
// commit output *before* merge thread completes
mout1.commit();
mout2.commit();
// allow the first merge to complete
mergeComplete.await();
// start the second merge and verify
mergeStart.await();
Assert.assertEquals(2, mgr.getNumMerges());
// trigger the end of the second merge
mergeComplete.await();
Assert.assertEquals(2, mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",
0, reporter.getNumExceptions());
}
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
BoundedByteArrayOutputStream stream = output.getArrayStream();
int count = stream.getLimit();
for (int i=0; i < count; ++i) {
stream.write(i);
}
}
private static class StubbedMergeManager extends MergeManagerImpl<Text, Text> {
private TestMergeThread mergeThread;
public StubbedMergeManager(JobConf conf, ExceptionReporter reporter,
CyclicBarrier mergeStart, CyclicBarrier mergeComplete) {
super(null, conf, mock(LocalFileSystem.class), null, null, null, null,
null, null, null, null, reporter, null, mock(MapOutputFile.class));
mergeThread.setSyncBarriers(mergeStart, mergeComplete);
}
@Override
protected MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> createInMemoryMerger() {
mergeThread = new TestMergeThread(this, getExceptionReporter());
return mergeThread;
}
public int getNumMerges() {
return mergeThread.getNumMerges();
}
}
private static class TestMergeThread
extends MergeThread<InMemoryMapOutput<Text,Text>, Text, Text> {
private AtomicInteger numMerges;
private CyclicBarrier mergeStart;
private CyclicBarrier mergeComplete;
public TestMergeThread(MergeManagerImpl<Text, Text> mergeManager,
ExceptionReporter reporter) {
super(mergeManager, Integer.MAX_VALUE, reporter);
numMerges = new AtomicInteger(0);
}
public synchronized void setSyncBarriers(
CyclicBarrier mergeStart, CyclicBarrier mergeComplete) {
this.mergeStart = mergeStart;
this.mergeComplete = mergeComplete;
}
public int getNumMerges() {
return numMerges.get();
}
@Override
public void merge(List<InMemoryMapOutput<Text, Text>> inputs)
throws IOException {
synchronized (this) {
numMerges.incrementAndGet();
for (InMemoryMapOutput<Text, Text> input : inputs) {
manager.unreserve(input.getSize());
}
}
try {
mergeStart.await();
mergeComplete.await();
} catch (InterruptedException e) {
} catch (BrokenBarrierException e) {
}
}
}
private static class TestExceptionReporter implements ExceptionReporter {
private List<Throwable> exceptions = new ArrayList<Throwable>();
@Override
public void reportException(Throwable t) {
exceptions.add(t);
t.printStackTrace();
}
public int getNumExceptions() {
return exceptions.size();
}
}
@SuppressWarnings({ "unchecked", "deprecation" })
@Test(timeout=10000)
public void testOnDiskMerger() throws IOException, URISyntaxException,
InterruptedException {
JobConf jobConf = new JobConf();
final int SORT_FACTOR = 5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR, SORT_FACTOR);
MapOutputFile mapOutputFile = new MROutputFiles();
FileSystem fs = FileSystem.getLocal(jobConf);
MergeManagerImpl<IntWritable, IntWritable> manager =
new MergeManagerImpl<IntWritable, IntWritable>(null, jobConf, fs, null
, null, null, null, null, null, null, null, null, null, mapOutputFile);
MergeThread<MapOutput<IntWritable, IntWritable>, IntWritable, IntWritable>
onDiskMerger = (MergeThread<MapOutput<IntWritable, IntWritable>,
IntWritable, IntWritable>) Whitebox.getInternalState(manager,
"onDiskMerger");
int mergeFactor = (Integer) Whitebox.getInternalState(onDiskMerger,
"mergeFactor");
// make sure the io.sort.factor is set properly
assertEquals(mergeFactor, SORT_FACTOR);
// Stop the onDiskMerger thread so that we can intercept the list of files
// waiting to be merged.
onDiskMerger.suspend();
//Send the list of fake files waiting to be merged
Random rand = new Random();
for(int i = 0; i < 2*SORT_FACTOR; ++i) {
Path path = new Path("somePath");
CompressAwarePath cap = new CompressAwarePath(path, 1l, rand.nextInt());
manager.closeOnDiskFile(cap);
}
//Check that the files pending to be merged are in sorted order.
LinkedList<List<CompressAwarePath>> pendingToBeMerged =
(LinkedList<List<CompressAwarePath>>) Whitebox.getInternalState(
onDiskMerger, "pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",
pendingToBeMerged.size() > 0);
for(int i = 0; i < pendingToBeMerged.size(); ++i) {
List<CompressAwarePath> inputs = pendingToBeMerged.get(i);
for(int j = 1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",
inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",
inputs.get(j).getCompressedSize()
>= inputs.get(j-1).getCompressedSize());
}
}
}
@Test
public void testLargeMemoryLimits() throws Exception {
final JobConf conf = new JobConf();
// Xmx in production
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
8L * 1024 * 1024 * 1024);
// M1 = Xmx fraction for map outputs
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 1.0f);
// M2 = max M1 fraction for a single maple output
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, 0.95f);
// M3 = M1 fraction at which in memory merge is triggered
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 1.0f);
// M4 = M1 fraction of map outputs remaining in memory for a reduce
conf.setFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 1.0f);
final MergeManagerImpl<Text, Text> mgr = new MergeManagerImpl<Text, Text>(
null, conf, mock(LocalFileSystem.class), null, null, null, null, null,
null, null, null, null, null, new MROutputFiles());
assertTrue("Large shuffle area unusable: " + mgr.memoryLimit,
mgr.memoryLimit > Integer.MAX_VALUE);
final long maxInMemReduce = mgr.getMaxInMemReduceLimit();
assertTrue("Large in-memory reduce area unusable: " + maxInMemReduce,
maxInMemReduce > Integer.MAX_VALUE);
}
}
| 11,026 | 36.634812 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.task.reduce;
import static org.mockito.Mockito.mock;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.ShuffleConsumerPlugin;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.util.Progress;
import org.junit.Assert;
import org.junit.Test;
public class TestShuffleScheduler {
@SuppressWarnings("rawtypes")
@Test
public void testTipFailed() throws Exception {
JobConf job = new JobConf();
job.setNumMapTasks(2);
TaskStatus status = new TaskStatus() {
@Override
public boolean getIsMap() {
return false;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
};
Progress progress = new Progress();
TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE,
0, 0);
ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status,
reduceId, null, progress, null, null, null);
JobID jobId = new JobID();
TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
scheduler.tipFailed(taskId1);
Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
0.0f);
Assert.assertFalse(scheduler.waitUntilDone(1));
TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
scheduler.tipFailed(taskId0);
Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
0.0f);
Assert.assertTrue(scheduler.waitUntilDone(1));
}
@SuppressWarnings("rawtypes")
@Test
public <K, V> void TestAggregatedTransferRate() throws Exception {
JobConf job = new JobConf();
job.setNumMapTasks(10);
//mock creation
TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class);
Reporter mockReporter = mock(Reporter.class);
FileSystem mockFileSystem = mock(FileSystem.class);
Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = job.getCombinerClass();
@SuppressWarnings("unchecked") // needed for mock with generic
CombineOutputCollector<K, V> mockCombineOutputCollector =
(CombineOutputCollector<K, V>) mock(CombineOutputCollector.class);
org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID =
mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
CompressionCodec mockCompressionCodec = mock(CompressionCodec.class);
Counter mockCounter = mock(Counter.class);
TaskStatus mockTaskStatus = mock(TaskStatus.class);
Progress mockProgress = mock(Progress.class);
MapOutputFile mockMapOutputFile = mock(MapOutputFile.class);
Task mockTask = mock(Task.class);
@SuppressWarnings("unchecked")
MapOutput<K, V> output = mock(MapOutput.class);
ShuffleConsumerPlugin.Context<K, V> context =
new ShuffleConsumerPlugin.Context<K, V>(mockTaskAttemptID, job, mockFileSystem,
mockUmbilical, mockLocalDirAllocator,
mockReporter, mockCompressionCodec,
combinerClass, mockCombineOutputCollector,
mockCounter, mockCounter, mockCounter,
mockCounter, mockCounter, mockCounter,
mockTaskStatus, mockProgress, mockProgress,
mockTask, mockMapOutputFile, null);
TaskStatus status = new TaskStatus() {
@Override
public boolean getIsMap() {
return false;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
};
Progress progress = new Progress();
ShuffleSchedulerImpl<K, V> scheduler = new ShuffleSchedulerImpl<K, V>(job, status, null,
null, progress, context.getShuffledMapsCounter(),
context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
TaskAttemptID attemptID0 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 0), 0);
//adding the 1st interval, 40MB from 60s to 100s
long bytes = (long)40 * 1024 * 1024;
scheduler.copySucceeded(attemptID0, new MapHost(null, null), bytes, 60000, 100000, output);
Assert.assertEquals(copyMessage(1, 1, 1), progress.toString());
TaskAttemptID attemptID1 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 1), 1);
//adding the 2nd interval before the 1st interval, 50MB from 0s to 50s
bytes = (long)50 * 1024 * 1024;
scheduler.copySucceeded(attemptID1, new MapHost(null, null), bytes, 0, 50000, output);
Assert.assertEquals(copyMessage(2, 1, 1), progress.toString());
TaskAttemptID attemptID2 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 2), 2);
//adding the 3rd interval overlapping with the 1st and the 2nd interval
//110MB from 25s to 80s
bytes = (long)110 * 1024 * 1024;
scheduler.copySucceeded(attemptID2, new MapHost(null, null), bytes, 25000, 80000, output);
Assert.assertEquals(copyMessage(3, 2, 2), progress.toString());
TaskAttemptID attemptID3 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 3), 3);
//adding the 4th interval just after the 2nd interval, 100MB from 100s to 300s
bytes = (long)100 * 1024 * 1024;
scheduler.copySucceeded(attemptID3, new MapHost(null, null), bytes, 100000, 300000, output);
Assert.assertEquals(copyMessage(4, 0.5, 1), progress.toString());
TaskAttemptID attemptID4 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 4), 4);
//adding the 5th interval between after 4th, 50MB from 350s to 400s
bytes = (long)50 * 1024 * 1024;
scheduler.copySucceeded(attemptID4, new MapHost(null, null), bytes, 350000, 400000, output);
Assert.assertEquals(copyMessage(5, 1, 1), progress.toString());
TaskAttemptID attemptID5 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 5), 5);
//adding the 6th interval between after 5th, 50MB from 450s to 500s
bytes = (long)50 * 1024 * 1024;
scheduler.copySucceeded(attemptID5, new MapHost(null, null), bytes, 450000, 500000, output);
Assert.assertEquals(copyMessage(6, 1, 1), progress.toString());
TaskAttemptID attemptID6 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 6), 6);
//adding the 7th interval between after 5th and 6th interval, 20MB from 320s to 340s
bytes = (long)20 * 1024 * 1024;
scheduler.copySucceeded(attemptID6, new MapHost(null, null), bytes, 320000, 340000, output);
Assert.assertEquals(copyMessage(7, 1, 1), progress.toString());
TaskAttemptID attemptID7 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 7), 7);
//adding the 8th interval overlapping with 4th, 5th, and 7th 30MB from 290s to 350s
bytes = (long)30 * 1024 * 1024;
scheduler.copySucceeded(attemptID7, new MapHost(null, null), bytes, 290000, 350000, output);
Assert.assertEquals(copyMessage(8, 0.5, 1), progress.toString());
TaskAttemptID attemptID8 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 8), 8);
//adding the 9th interval overlapping with 5th and 6th, 50MB from 400s to 450s
bytes = (long)50 * 1024 * 1024;
scheduler.copySucceeded(attemptID8, new MapHost(null, null), bytes, 400000, 450000, output);
Assert.assertEquals(copyMessage(9, 1, 1), progress.toString());
TaskAttemptID attemptID9 = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 9), 9);
//adding the 10th interval overlapping with all intervals, 500MB from 0s to 500s
bytes = (long)500 * 1024 * 1024;
scheduler.copySucceeded(attemptID9, new MapHost(null, null), bytes, 0, 500000, output);
Assert.assertEquals(copyMessage(10, 1, 2), progress.toString());
}
@SuppressWarnings("rawtypes")
@Test
public <K, V> void TestSucceedAndFailedCopyMap() throws Exception {
JobConf job = new JobConf();
job.setNumMapTasks(2);
//mock creation
TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class);
Reporter mockReporter = mock(Reporter.class);
FileSystem mockFileSystem = mock(FileSystem.class);
Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = job.getCombinerClass();
@SuppressWarnings("unchecked") // needed for mock with generic
CombineOutputCollector<K, V> mockCombineOutputCollector =
(CombineOutputCollector<K, V>) mock(CombineOutputCollector.class);
org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID =
mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
CompressionCodec mockCompressionCodec = mock(CompressionCodec.class);
Counter mockCounter = mock(Counter.class);
TaskStatus mockTaskStatus = mock(TaskStatus.class);
Progress mockProgress = mock(Progress.class);
MapOutputFile mockMapOutputFile = mock(MapOutputFile.class);
Task mockTask = mock(Task.class);
@SuppressWarnings("unchecked")
MapOutput<K, V> output = mock(MapOutput.class);
ShuffleConsumerPlugin.Context<K, V> context =
new ShuffleConsumerPlugin.Context<K, V>(
mockTaskAttemptID, job, mockFileSystem,
mockUmbilical, mockLocalDirAllocator,
mockReporter, mockCompressionCodec,
combinerClass, mockCombineOutputCollector,
mockCounter, mockCounter, mockCounter,
mockCounter, mockCounter, mockCounter,
mockTaskStatus, mockProgress, mockProgress,
mockTask, mockMapOutputFile, null);
TaskStatus status = new TaskStatus() {
@Override
public boolean getIsMap() {
return false;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
};
Progress progress = new Progress();
ShuffleSchedulerImpl<K, V> scheduler = new ShuffleSchedulerImpl<K, V>(job,
status, null, null, progress, context.getShuffledMapsCounter(),
context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
MapHost host1 = new MapHost("host1", null);
TaskAttemptID failedAttemptID = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 0), 0);
TaskAttemptID succeedAttemptID = new TaskAttemptID(
new org.apache.hadoop.mapred.TaskID(
new JobID("test",0), TaskType.MAP, 1), 1);
// handle output fetch failure for failedAttemptID, part I
scheduler.hostFailed(host1.getHostName());
// handle output fetch succeed for succeedAttemptID
long bytes = (long)500 * 1024 * 1024;
scheduler.copySucceeded(succeedAttemptID, host1, bytes, 0, 500000, output);
// handle output fetch failure for failedAttemptID, part II
// for MAPREDUCE-6361: verify no NPE exception get thrown out
scheduler.copyFailed(failedAttemptID, host1, true, false);
}
private static String copyMessage(int attemptNo, double rate1, double rate2) {
int attemptZero = attemptNo - 1;
return String.format("copy task(attempt_test_0000_m_%06d_%d succeeded at %1.2f MB/s)"
+ " Aggregated copy rate(%d of 10 at %1.2f MB/s)", attemptZero
, attemptZero, rate1, attemptNo, rate2);
}
}
| 13,261 | 44.262799 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPreemptableFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.mapreduce.task.annotation.Checkpointable;
import org.junit.Test;
import static org.mockito.Mockito.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
public class TestPreemptableFileOutputCommitter {
@Test
public void testPartialOutputCleanup()
throws FileNotFoundException, IllegalArgumentException, IOException {
Configuration conf = new Configuration(false);
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
TaskAttemptID tid0 =
new TaskAttemptID("1363718006656", 1, TaskType.REDUCE, 14, 3);
Path p = spy(new Path("/user/hadoop/out"));
Path a = new Path("hdfs://user/hadoop/out");
Path p0 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_0");
Path p1 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_1");
Path p2 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000013_0");
// (p3 does not exist)
Path p3 = new Path(a, "_temporary/1/attempt_1363718006656_0001_r_000014_2");
FileStatus[] fsa = new FileStatus[3];
fsa[0] = new FileStatus();
fsa[0].setPath(p0);
fsa[1] = new FileStatus();
fsa[1].setPath(p1);
fsa[2] = new FileStatus();
fsa[2].setPath(p2);
final FileSystem fs = mock(FileSystem.class);
when(fs.exists(eq(p0))).thenReturn(true);
when(fs.exists(eq(p1))).thenReturn(true);
when(fs.exists(eq(p2))).thenReturn(true);
when(fs.exists(eq(p3))).thenReturn(false);
when(fs.delete(eq(p0), eq(true))).thenReturn(true);
when(fs.delete(eq(p1), eq(true))).thenReturn(true);
doReturn(fs).when(p).getFileSystem(any(Configuration.class));
when(fs.makeQualified(eq(p))).thenReturn(a);
TaskAttemptContext context = mock(TaskAttemptContext.class);
when(context.getTaskAttemptID()).thenReturn(tid0);
when(context.getConfiguration()).thenReturn(conf);
PartialFileOutputCommitter foc = new TestPFOC(p, context, fs);
foc.cleanUpPartialOutputForTask(context);
verify(fs).delete(eq(p0), eq(true));
verify(fs).delete(eq(p1), eq(true));
verify(fs, never()).delete(eq(p3), eq(true));
verify(fs, never()).delete(eq(p2), eq(true));
}
@Checkpointable
static class TestPFOC extends PartialFileOutputCommitter {
final FileSystem fs;
TestPFOC(Path outputPath, TaskAttemptContext ctxt, FileSystem fs)
throws IOException {
super(outputPath, ctxt);
this.fs = fs;
}
@Override
FileSystem fsFor(Path p, Configuration conf) {
return fs;
}
}
}
| 3,741 | 35.686275 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
public class TestFileOutputFormat extends TestCase {
public void testSetOutputPathException() throws Exception {
Job job = Job.getInstance();
try {
// Give it an invalid filesystem so it'll throw an exception
FileOutputFormat.setOutputPath(job, new Path("foo:///bar"));
fail("Should have thrown a RuntimeException with an IOException inside");
}
catch (RuntimeException re) {
assertTrue(re.getCause() instanceof IOException);
}
}
public void testCheckOutputSpecsException() throws Exception {
Job job = Job.getInstance();
Path outDir = new Path(System.getProperty("test.build.data", "/tmp"),
"output");
FileSystem fs = outDir.getFileSystem(new Configuration());
// Create the output dir so it already exists and set it for the job
fs.mkdirs(outDir);
FileOutputFormat.setOutputPath(job, outDir);
// We don't need a "full" implementation of FileOutputFormat for this test
FileOutputFormat fof = new FileOutputFormat() {
@Override
public RecordWriter getRecordWriter(TaskAttemptContext job)
throws IOException, InterruptedException {
return null;
}
};
try {
try {
// This should throw a FileAlreadyExistsException because the outputDir
// already exists
fof.checkOutputSpecs(job);
fail("Should have thrown a FileAlreadyExistsException");
}
catch (FileAlreadyExistsException re) {
// correct behavior
}
}
finally {
// Cleanup
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
}
}
}
| 2,876 | 34.9625 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.output;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
@SuppressWarnings("unchecked")
public class TestFileOutputCommitter extends TestCase {
private static final Path outDir = new Path(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestFileOutputCommitter.class.getName());
private final static String SUB_DIR = "SUB_DIR";
private final static Path OUT_SUB_DIR = new Path(outDir, SUB_DIR);
private static final Log LOG =
LogFactory.getLog(TestFileOutputCommitter.class);
// A random task attempt id for testing.
private static final String attempt = "attempt_200707121733_0001_m_000000_0";
private static final String partFile = "part-m-00000";
private static final TaskAttemptID taskID = TaskAttemptID.forName(attempt);
private static final String attempt1 = "attempt_200707121733_0001_m_000001_0";
private static final TaskAttemptID taskID1 = TaskAttemptID.forName(attempt1);
private Text key1 = new Text("key1");
private Text key2 = new Text("key2");
private Text val1 = new Text("val1");
private Text val2 = new Text("val2");
private static void cleanup() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = outDir.getFileSystem(conf);
fs.delete(outDir, true);
}
@Override
public void setUp() throws IOException {
cleanup();
}
@Override
public void tearDown() throws IOException {
cleanup();
}
private void writeOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(context);
}
}
private void writeMapFileOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
try {
int key = 0;
for (int i = 0 ; i < 10; ++i) {
key = i;
Text val = (i%2 == 1) ? val1 : val2;
theRecordWriter.write(new LongWritable(key),
val);
}
} finally {
theRecordWriter.close(context);
}
}
private void testRecoveryInternal(int commitVersion, int recoveryVersion)
throws Exception {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
commitVersion);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do commit
committer.commitTask(tContext);
Path jobTempDir1 = committer.getCommittedTaskPath(tContext);
File jtd = new File(jobTempDir1.toUri().getPath());
if (commitVersion == 1) {
assertTrue("Version 1 commits to temporary dir " + jtd, jtd.exists());
validateContent(jtd);
} else {
assertFalse("Version 2 commits to output dir " + jtd, jtd.exists());
}
//now while running the second app attempt,
//recover the task output from first attempt
Configuration conf2 = job.getConfiguration();
conf2.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf2.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 2);
conf2.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
recoveryVersion);
JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID());
TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID);
FileOutputCommitter committer2 = new FileOutputCommitter(outDir, tContext2);
committer2.setupJob(tContext2);
Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2);
File jtd2 = new File(jobTempDir2.toUri().getPath());
committer2.recoverTask(tContext2);
if (recoveryVersion == 1) {
assertTrue("Version 1 recovers to " + jtd2, jtd2.exists());
validateContent(jtd2);
} else {
assertFalse("Version 2 commits to output dir " + jtd2, jtd2.exists());
if (commitVersion == 1) {
assertTrue("Version 2 recovery moves to output dir from "
+ jtd , jtd.list().length == 0);
}
}
committer2.commitJob(jContext2);
validateContent(outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testRecoveryV1() throws Exception {
testRecoveryInternal(1, 1);
}
public void testRecoveryV2() throws Exception {
testRecoveryInternal(2, 2);
}
public void testRecoveryUpgradeV1V2() throws Exception {
testRecoveryInternal(1, 2);
}
private void validateContent(Path dir) throws IOException {
validateContent(new File(dir.toUri().getPath()));
}
private void validateContent(File dir) throws IOException {
File expectedFile = new File(dir, partFile);
assertTrue("Could not find "+expectedFile, expectedFile.exists());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = slurp(expectedFile);
assertEquals(output, expectedOutput.toString());
}
private void validateMapFileOutputContent(
FileSystem fs, Path dir) throws IOException {
// map output is a directory with index and data files
Path expectedMapDir = new Path(dir, partFile);
assert(fs.getFileStatus(expectedMapDir).isDirectory());
FileStatus[] files = fs.listStatus(expectedMapDir);
int fileCount = 0;
boolean dataFileFound = false;
boolean indexFileFound = false;
for (FileStatus f : files) {
if (f.isFile()) {
++fileCount;
if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
indexFileFound = true;
}
else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
dataFileFound = true;
}
}
}
assert(fileCount > 0);
assert(dataFileFound && indexFileFound);
}
private void testCommitterInternal(int version) throws Exception {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do commit
committer.commitTask(tContext);
committer.commitJob(jContext);
// validate output
validateContent(outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testCommitterV1() throws Exception {
testCommitterInternal(1);
}
public void testCommitterV2() throws Exception {
testCommitterInternal(2);
}
private void testMapFileOutputCommitterInternal(int version)
throws Exception {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeMapFileOutput(theRecordWriter, tContext);
// do commit
committer.commitTask(tContext);
committer.commitJob(jContext);
// Ensure getReaders call works and also ignores
// hidden filenames (_ or . prefixes)
try {
MapFileOutputFormat.getReaders(outDir, conf);
} catch (Exception e) {
fail("Fail to read from MapFileOutputFormat: " + e);
e.printStackTrace();
}
// validate output
validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testMapFileOutputCommitterV1() throws Exception {
testMapFileOutputCommitterInternal(1);
}
public void testMapFileOutputCommitterV2() throws Exception {
testMapFileOutputCommitterInternal(2);
}
public void testInvalidVersionNumber() throws IOException {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, 3);
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
try {
new FileOutputCommitter(outDir, tContext);
fail("should've thrown an exception!");
} catch (IOException e) {
//test passed
}
}
private void testAbortInternal(int version)
throws IOException, InterruptedException {
Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do abort
committer.abortTask(tContext);
File expectedFile = new File(new Path(committer.getWorkPath(), partFile)
.toString());
assertFalse("task temp dir still exists", expectedFile.exists());
committer.abortJob(jContext, JobStatus.State.FAILED);
expectedFile = new File(new Path(outDir, FileOutputCommitter.PENDING_DIR_NAME)
.toString());
assertFalse("job temp dir still exists", expectedFile.exists());
assertEquals("Output directory not empty", 0, new File(outDir.toString())
.listFiles().length);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testAbortV1() throws IOException, InterruptedException {
testAbortInternal(1);
}
public void testAbortV2() throws IOException, InterruptedException {
testAbortInternal(2);
}
public static class FakeFileSystem extends RawLocalFileSystem {
public FakeFileSystem() {
super();
}
public URI getUri() {
return URI.create("faildel:///");
}
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
throw new IOException("fake delete failed");
}
}
private void testFailAbortInternal(int version)
throws IOException, InterruptedException {
Job job = Job.getInstance();
Configuration conf = job.getConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
version);
FileOutputFormat.setOutputPath(job, outDir);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter<?, ?> theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
writeOutput(theRecordWriter, tContext);
// do abort
Throwable th = null;
try {
committer.abortTask(tContext);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
Path jtd = committer.getJobAttemptPath(jContext);
File jobTmpDir = new File(jtd.toUri().getPath());
Path ttd = committer.getTaskAttemptPath(tContext);
File taskTmpDir = new File(ttd.toUri().getPath());
File expectedFile = new File(taskTmpDir, partFile);
assertTrue(expectedFile + " does not exists", expectedFile.exists());
th = null;
try {
committer.abortJob(jContext, JobStatus.State.FAILED);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue("job temp dir does not exists", jobTmpDir.exists());
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testFailAbortV1() throws Exception {
testFailAbortInternal(1);
}
public void testFailAbortV2() throws Exception {
testFailAbortInternal(2);
}
static class RLFS extends RawLocalFileSystem {
private final ThreadLocal<Boolean> needNull = new ThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return true;
}
};
public RLFS() {
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
if (needNull.get() &&
OUT_SUB_DIR.toUri().getPath().equals(f.toUri().getPath())) {
needNull.set(false); // lie once per thread
return null;
}
return super.getFileStatus(f);
}
}
private void testConcurrentCommitTaskWithSubDir(int version)
throws Exception {
final Job job = Job.getInstance();
FileOutputFormat.setOutputPath(job, outDir);
final Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
version);
conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
FileSystem.closeAll();
final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
final FileOutputCommitter amCommitter =
new FileOutputCommitter(outDir, jContext);
amCommitter.setupJob(jContext);
final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);
final TextOutputFormat[] tof = new TextOutputFormat[2];
for (int i = 0; i < tof.length; i++) {
tof[i] = new TextOutputFormat() {
@Override
public Path getDefaultWorkFile(TaskAttemptContext context,
String extension) throws IOException {
final FileOutputCommitter foc = (FileOutputCommitter)
getOutputCommitter(context);
return new Path(new Path(foc.getWorkPath(), SUB_DIR),
getUniqueFile(context, getOutputName(context), extension));
}
};
}
final ExecutorService executor = Executors.newFixedThreadPool(2);
try {
for (int i = 0; i < taCtx.length; i++) {
final int taskIdx = i;
executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException, InterruptedException {
final OutputCommitter outputCommitter =
tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
outputCommitter.setupTask(taCtx[taskIdx]);
final RecordWriter rw =
tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
writeOutput(rw, taCtx[taskIdx]);
outputCommitter.commitTask(taCtx[taskIdx]);
return null;
}
});
}
} finally {
executor.shutdown();
while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
LOG.info("Awaiting thread termination!");
}
}
amCommitter.commitJob(jContext);
final RawLocalFileSystem lfs = new RawLocalFileSystem();
lfs.setConf(conf);
assertFalse("Must not end up with sub_dir/sub_dir",
lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));
// validate output
validateContent(OUT_SUB_DIR);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testConcurrentCommitTaskWithSubDirV1() throws Exception {
testConcurrentCommitTaskWithSubDir(1);
}
public void testConcurrentCommitTaskWithSubDirV2() throws Exception {
testConcurrentCommitTaskWithSubDir(2);
}
public static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
}
| 20,740 | 34.27381 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/db/DriverForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.Driver;
import java.sql.DriverPropertyInfo;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.sql.Statement;
import java.util.Properties;
import java.util.logging.Logger;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
/**
* class emulates a connection to database
*
*/
public class DriverForTest implements Driver {
public static Connection getConnection() {
Connection connection = mock(FakeConnection.class);
try {
Statement statement = mock(Statement.class);
ResultSet results = mock(ResultSet.class);
when(results.getLong(1)).thenReturn(15L);
when(statement.executeQuery(any(String.class))).thenReturn(results);
when(connection.createStatement()).thenReturn(statement);
DatabaseMetaData metadata = mock(DatabaseMetaData.class);
when(metadata.getDatabaseProductName()).thenReturn("Test");
when(connection.getMetaData()).thenReturn(metadata);
PreparedStatement reparedStatement0= mock(PreparedStatement.class);
when(connection.prepareStatement(anyString())).thenReturn(
reparedStatement0);
PreparedStatement preparedStatement = mock(PreparedStatement.class);
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.next()).thenReturn(false);
when(preparedStatement.executeQuery()).thenReturn(resultSet);
when(connection.prepareStatement(anyString(), anyInt(), anyInt()))
.thenReturn(preparedStatement);
} catch (SQLException e) {
;
}
return connection;
}
@Override
public boolean acceptsURL(String arg0) throws SQLException {
return "testUrl".equals(arg0);
}
@Override
public Connection connect(String arg0, Properties arg1) throws SQLException {
return getConnection();
}
@Override
public int getMajorVersion() {
return 1;
}
@Override
public int getMinorVersion() {
return 1;
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String arg0, Properties arg1)
throws SQLException {
return null;
}
@Override
public boolean jdbcCompliant() {
return true;
}
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
throw new SQLFeatureNotSupportedException();
}
private interface FakeConnection extends Connection{
public void setSessionTimeZone(String arg);
}
}
| 3,399 | 29.088496 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestDbClasses.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import java.sql.Connection;
import java.sql.Types;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable;
import org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestDbClasses {
/**
* test splitters from DataDrivenDBInputFormat. For different data types may
* be different splitter
*/
@Test(timeout = 10000)
public void testDataDrivenDBInputFormatSplitter() {
DataDrivenDBInputFormat<NullDBWritable> format = new DataDrivenDBInputFormat<NullDBWritable>();
testCommonSplitterTypes(format);
assertEquals(DateSplitter.class, format.getSplitter(Types.TIMESTAMP)
.getClass());
assertEquals(DateSplitter.class, format.getSplitter(Types.DATE).getClass());
assertEquals(DateSplitter.class, format.getSplitter(Types.TIME).getClass());
}
@Test(timeout = 10000)
public void testDataDrivenDBInputFormat() throws Exception {
JobContext jobContext = mock(JobContext.class);
Configuration configuration = new Configuration();
configuration.setInt(MRJobConfig.NUM_MAPS, 1);
when(jobContext.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat<NullDBWritable> format = new DataDrivenDBInputFormat<NullDBWritable>();
List<InputSplit> splits = format.getSplits(jobContext);
assertEquals(1, splits.size());
DataDrivenDBInputSplit split = (DataDrivenDBInputSplit) splits.get(0);
assertEquals("1=1", split.getLowerClause());
assertEquals("1=1", split.getUpperClause());
// 2
configuration.setInt(MRJobConfig.NUM_MAPS, 2);
DataDrivenDBInputFormat.setBoundingQuery(configuration, "query");
assertEquals("query",
configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
Job job = mock(Job.class);
when(job.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat.setInput(job, NullDBWritable.class, "query",
"Bounding Query");
assertEquals("Bounding Query",
configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
}
@Test(timeout = 10000)
public void testOracleDataDrivenDBInputFormat() throws Exception {
OracleDataDrivenDBInputFormat<NullDBWritable> format =
new OracleDataDrivenDBInputFormatForTest();
testCommonSplitterTypes(format);
assertEquals(OracleDateSplitter.class, format.getSplitter(Types.TIMESTAMP)
.getClass());
assertEquals(OracleDateSplitter.class,
format.getSplitter(Types.DATE).getClass());
assertEquals(OracleDateSplitter.class,
format.getSplitter(Types.TIME).getClass());
}
/**
* test generate sql script for OracleDBRecordReader.
*/
@Test(timeout = 20000)
public void testOracleDBRecordReader() throws Exception {
DBInputSplit splitter = new DBInputSplit(1, 10);
Configuration configuration = new Configuration();
Connection connect = DriverForTest.getConnection();
DBConfiguration dbConfiguration = new DBConfiguration(configuration);
dbConfiguration.setInputOrderBy("Order");
String[] fields = { "f1", "f2" };
OracleDBRecordReader<NullDBWritable> recorder = new OracleDBRecordReader<NullDBWritable>(
splitter, NullDBWritable.class, configuration, connect,
dbConfiguration, "condition", fields, "table");
assertEquals(
"SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( SELECT f1, f2 FROM table WHERE condition ORDER BY Order ) a WHERE rownum <= 10 ) WHERE dbif_rno > 1",
recorder.getSelectQuery());
}
private void testCommonSplitterTypes(
DataDrivenDBInputFormat<NullDBWritable> format) {
assertEquals(BigDecimalSplitter.class, format.getSplitter(Types.DECIMAL)
.getClass());
assertEquals(BigDecimalSplitter.class, format.getSplitter(Types.NUMERIC)
.getClass());
assertEquals(BooleanSplitter.class, format.getSplitter(Types.BOOLEAN)
.getClass());
assertEquals(BooleanSplitter.class, format.getSplitter(Types.BIT)
.getClass());
assertEquals(IntegerSplitter.class, format.getSplitter(Types.BIGINT)
.getClass());
assertEquals(IntegerSplitter.class, format.getSplitter(Types.TINYINT)
.getClass());
assertEquals(IntegerSplitter.class, format.getSplitter(Types.SMALLINT)
.getClass());
assertEquals(IntegerSplitter.class, format.getSplitter(Types.INTEGER)
.getClass());
assertEquals(FloatSplitter.class, format.getSplitter(Types.DOUBLE)
.getClass());
assertEquals(FloatSplitter.class, format.getSplitter(Types.REAL).getClass());
assertEquals(FloatSplitter.class, format.getSplitter(Types.FLOAT)
.getClass());
assertEquals(TextSplitter.class, format.getSplitter(Types.LONGVARCHAR)
.getClass());
assertEquals(TextSplitter.class, format.getSplitter(Types.CHAR).getClass());
assertEquals(TextSplitter.class, format.getSplitter(Types.VARCHAR)
.getClass());
// if unknown data type splitter is null
assertNull(format.getSplitter(Types.BINARY));
}
private class OracleDataDrivenDBInputFormatForTest extends
OracleDataDrivenDBInputFormat<NullDBWritable> {
@Override
public DBConfiguration getDBConf() {
String[] names = { "field1", "field2" };
DBConfiguration result = mock(DBConfiguration.class);
when(result.getInputConditions()).thenReturn("conditions");
when(result.getInputFieldNames()).thenReturn(names);
when(result.getInputTableName()).thenReturn("table");
return result;
}
@Override
public Connection getConnection() {
return DriverForTest.getConnection();
}
}
}
| 6,875 | 39.210526 | 159 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/db/TestSplitters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.db;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.ResultSet;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit;
import org.junit.Before;
import org.junit.Test;
/**
* Test Splitters. Splitters should build parts of sql sentences for split result.
*/
public class TestSplitters {
private Configuration configuration;
@Before
public void setup() {
configuration = new Configuration();
configuration.setInt(MRJobConfig.NUM_MAPS, 2);
}
@Test(timeout=2000)
public void testBooleanSplitter() throws Exception{
BooleanSplitter splitter = new BooleanSplitter();
ResultSet result = mock(ResultSet.class);
when(result.getString(1)).thenReturn("result1");
List<InputSplit> splits=splitter.split(configuration, result, "column");
assertSplits(new String[] {"column = FALSE column = FALSE",
"column IS NULL column IS NULL"}, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBoolean(1)).thenReturn(true);
when(result.getBoolean(2)).thenReturn(false);
splits=splitter.split(configuration, result, "column");
assertEquals(0, splits.size());
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBoolean(1)).thenReturn(false);
when(result.getBoolean(2)).thenReturn(true);
splits = splitter.split(configuration, result, "column");
assertSplits(new String[] {
"column = FALSE column = FALSE", ".*column = TRUE"}, splits);
}
@Test(timeout=2000)
public void testFloatSplitter() throws Exception{
FloatSplitter splitter = new FloatSplitter();
ResultSet results = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, results, "column");
assertSplits(new String[] {".*column IS NULL"}, splits);
when(results.getString(1)).thenReturn("result1");
when(results.getString(2)).thenReturn("result2");
when(results.getDouble(1)).thenReturn(5.0);
when(results.getDouble(2)).thenReturn(7.0);
splits = splitter.split(configuration, results, "column1");
assertSplits(new String[] {"column1 >= 5.0 column1 < 6.0",
"column1 >= 6.0 column1 <= 7.0"}, splits);
}
@Test(timeout=2000)
public void testBigDecimalSplitter() throws Exception{
BigDecimalSplitter splitter = new BigDecimalSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] {".*column IS NULL"}, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBigDecimal(1)).thenReturn(new BigDecimal(10));
when(result.getBigDecimal(2)).thenReturn(new BigDecimal(12));
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] {"column1 >= 10 column1 < 11",
"column1 >= 11 column1 <= 12"}, splits);
}
@Test(timeout=2000)
public void testIntegerSplitter() throws Exception{
IntegerSplitter splitter = new IntegerSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] {".*column IS NULL"}, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getLong(1)).thenReturn(8L);
when(result.getLong(2)).thenReturn(19L);
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] {"column1 >= 8 column1 < 13",
"column1 >= 13 column1 < 18", "column1 >= 18 column1 <= 19"}, splits);
}
@Test(timeout=2000)
public void testTextSplitter() throws Exception{
TextSplitter splitter = new TextSplitter();
ResultSet result = mock(ResultSet.class);
List<InputSplit> splits = splitter.split(configuration, result, "column");
assertSplits(new String[] {"column IS NULL column IS NULL"}, splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
splits = splitter.split(configuration, result, "column1");
assertSplits(new String[] {"column1 >= 'result1' column1 < 'result1.'",
"column1 >= 'result1' column1 <= 'result2'"}, splits);
}
private void assertSplits(String[] expectedSplitRE,
List<InputSplit> splits) throws IOException {
assertEquals(expectedSplitRE.length, splits.size());
for (int i = 0; i < expectedSplitRE.length; i++) {
DataDrivenDBInputSplit split = (DataDrivenDBInputSplit) splits.get(i);
String actualExpr = split.getLowerClause() + " " + split.getUpperClause();
assertTrue("Split #" + (i+1) + " expression is wrong."
+ " Expected " + expectedSplitRE[i]
+ " Actual " + actualExpr,
Pattern.matches(expectedSplitRE[i], actualExpr));
}
}
}
| 6,270 | 37.006061 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import javax.annotation.Nullable;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapred.SplitLocationInfo;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
@RunWith(value = Parameterized.class)
public class TestFileInputFormat {
private static final Log LOG = LogFactory.getLog(TestFileInputFormat.class);
private static String testTmpDir = System.getProperty("test.build.data", "/tmp");
private static final Path TEST_ROOT_DIR = new Path(testTmpDir, "TestFIF");
private static FileSystem localFs;
private int numThreads;
public TestFileInputFormat(int numThreads) {
this.numThreads = numThreads;
LOG.info("Running with numThreads: " + numThreads);
}
@Parameters
public static Collection<Object[]> data() {
Object[][] data = new Object[][] { { 1 }, { 5 }};
return Arrays.asList(data);
}
@Before
public void setup() throws IOException {
LOG.info("Using Test Dir: " + TEST_ROOT_DIR);
localFs = FileSystem.getLocal(new Configuration());
localFs.delete(TEST_ROOT_DIR, true);
localFs.mkdirs(TEST_ROOT_DIR);
}
@After
public void cleanup() throws IOException {
localFs.delete(TEST_ROOT_DIR, true);
}
@Test
public void testNumInputFilesRecursively() throws Exception {
Configuration conf = getConfiguration();
conf.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true");
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 3, splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2/file2", "test:/a1/a2/file3",
"test:/a1/file1"), splits);
// Using the deprecated configuration
conf = getConfiguration();
conf.set("mapred.input.dir.recursive", "true");
job = Job.getInstance(conf);
splits = fileInputFormat.getSplits(job);
verifySplits(Lists.newArrayList("test:/a1/a2/file2", "test:/a1/a2/file3",
"test:/a1/file1"), splits);
}
@Test
public void testNumInputFilesWithoutRecursively() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 2, splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2", "test:/a1/file1"), splits);
}
@Test
public void testListLocatedStatus() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
conf.setBoolean("fs.test.impl.disable.cache", false);
conf.set(FileInputFormat.INPUT_DIR, "test:///a1/a2");
MockFileSystem mockFs =
(MockFileSystem) new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",
0, mockFs.numListLocatedStatusCalls);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 2, splits.size());
Assert.assertEquals("listLocatedStatuss calls",
1, mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
}
@Test
public void testSplitLocationInfo() throws Exception {
Configuration conf = getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
"test:///a1/a2");
Job job = Job.getInstance(conf);
TextInputFormat fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
String[] locations = splits.get(0).getLocations();
Assert.assertEquals(2, locations.length);
SplitLocationInfo[] locationInfo = splits.get(0).getLocationInfo();
Assert.assertEquals(2, locationInfo.length);
SplitLocationInfo localhostInfo = locations[0].equals("localhost") ?
locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo = locations[0].equals("otherhost") ?
locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
@Test
public void testListStatusSimple() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = configureTestSimple(conf, localFs);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fif = new TextInputFormat();
List<FileStatus> statuses = fif.listStatus(job);
verifyFileStatuses(expectedPaths, statuses, localFs);
}
@Test
public void testListStatusNestedRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = configureTestNestedRecursive(conf, localFs);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fif = new TextInputFormat();
List<FileStatus> statuses = fif.listStatus(job);
verifyFileStatuses(expectedPaths, statuses, localFs);
}
@Test
public void testListStatusNestedNonRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = configureTestNestedNonRecursive(conf, localFs);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fif = new TextInputFormat();
List<FileStatus> statuses = fif.listStatus(job);
verifyFileStatuses(expectedPaths, statuses, localFs);
}
@Test
public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
configureTestErrorOnNonExistantDir(conf, localFs);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fif = new TextInputFormat();
try {
fif.listStatus(job);
Assert.fail("Expecting an IOException for a missing Input path");
} catch (IOException e) {
Path expectedExceptionPath = new Path(TEST_ROOT_DIR, "input2");
expectedExceptionPath = localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals(
"Input path does not exist: " + expectedExceptionPath.toString(),
e.getMessage());
}
}
public static List<Path> configureTestSimple(Configuration conf, FileSystem localFs)
throws IOException {
Path base1 = new Path(TEST_ROOT_DIR, "input1");
Path base2 = new Path(TEST_ROOT_DIR, "input2");
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
localFs.mkdirs(base1);
localFs.mkdirs(base2);
Path in1File1 = new Path(base1, "file1");
Path in1File2 = new Path(base1, "file2");
localFs.createNewFile(in1File1);
localFs.createNewFile(in1File2);
Path in2File1 = new Path(base2, "file1");
Path in2File2 = new Path(base2, "file2");
localFs.createNewFile(in2File1);
localFs.createNewFile(in2File2);
List<Path> expectedPaths = Lists.newArrayList(in1File1, in1File2, in2File1,
in2File2);
return expectedPaths;
}
public static List<Path> configureTestNestedRecursive(Configuration conf,
FileSystem localFs) throws IOException {
Path base1 = new Path(TEST_ROOT_DIR, "input1");
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
localFs.makeQualified(base1).toString());
conf.setBoolean(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
true);
localFs.mkdirs(base1);
Path inDir1 = new Path(base1, "dir1");
Path inDir2 = new Path(base1, "dir2");
Path inFile1 = new Path(base1, "file1");
Path dir1File1 = new Path(inDir1, "file1");
Path dir1File2 = new Path(inDir1, "file2");
Path dir2File1 = new Path(inDir2, "file1");
Path dir2File2 = new Path(inDir2, "file2");
localFs.mkdirs(inDir1);
localFs.mkdirs(inDir2);
localFs.createNewFile(inFile1);
localFs.createNewFile(dir1File1);
localFs.createNewFile(dir1File2);
localFs.createNewFile(dir2File1);
localFs.createNewFile(dir2File2);
List<Path> expectedPaths = Lists.newArrayList(inFile1, dir1File1,
dir1File2, dir2File1, dir2File2);
return expectedPaths;
}
public static List<Path> configureTestNestedNonRecursive(Configuration conf,
FileSystem localFs) throws IOException {
Path base1 = new Path(TEST_ROOT_DIR, "input1");
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
localFs.makeQualified(base1).toString());
conf.setBoolean(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
false);
localFs.mkdirs(base1);
Path inDir1 = new Path(base1, "dir1");
Path inDir2 = new Path(base1, "dir2");
Path inFile1 = new Path(base1, "file1");
Path dir1File1 = new Path(inDir1, "file1");
Path dir1File2 = new Path(inDir1, "file2");
Path dir2File1 = new Path(inDir2, "file1");
Path dir2File2 = new Path(inDir2, "file2");
localFs.mkdirs(inDir1);
localFs.mkdirs(inDir2);
localFs.createNewFile(inFile1);
localFs.createNewFile(dir1File1);
localFs.createNewFile(dir1File2);
localFs.createNewFile(dir2File1);
localFs.createNewFile(dir2File2);
List<Path> expectedPaths = Lists.newArrayList(inFile1, inDir1, inDir2);
return expectedPaths;
}
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
FileSystem localFs) throws IOException {
Path base1 = new Path(TEST_ROOT_DIR, "input1");
Path base2 = new Path(TEST_ROOT_DIR, "input2");
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
conf.setBoolean(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
true);
localFs.mkdirs(base1);
Path inFile1 = new Path(base1, "file1");
Path inFile2 = new Path(base1, "file2");
localFs.createNewFile(inFile1);
localFs.createNewFile(inFile2);
List<Path> expectedPaths = Lists.newArrayList();
return expectedPaths;
}
public static void verifyFileStatuses(List<Path> expectedPaths,
List<FileStatus> fetchedStatuses, final FileSystem localFs) {
Assert.assertEquals(expectedPaths.size(), fetchedStatuses.size());
Iterable<Path> fqExpectedPaths = Iterables.transform(expectedPaths,
new Function<Path, Path>() {
@Override
public Path apply(Path input) {
return localFs.makeQualified(input);
}
});
Set<Path> expectedPathSet = Sets.newHashSet(fqExpectedPaths);
for (FileStatus fileStatus : fetchedStatuses) {
if (!expectedPathSet.remove(localFs.makeQualified(fileStatus.getPath()))) {
Assert.fail("Found extra fetched status: " + fileStatus.getPath());
}
}
Assert.assertEquals(
"Not all expectedPaths matched: " + expectedPathSet.toString(), 0,
expectedPathSet.size());
}
private void verifySplits(List<String> expected, List<InputSplit> splits) {
Iterable<String> pathsFromSplits = Iterables.transform(splits,
new Function<InputSplit, String>() {
@Override
public String apply(@Nullable InputSplit input) {
return ((FileSplit) input).getPath().toString();
}
});
Set<String> expectedSet = Sets.newHashSet(expected);
for (String splitPathString : pathsFromSplits) {
if (!expectedSet.remove(splitPathString)) {
Assert.fail("Found extra split: " + splitPathString);
}
}
Assert.assertEquals(
"Not all expectedPaths matched: " + expectedSet.toString(), 0,
expectedSet.size());
}
private Configuration getConfiguration() {
Configuration conf = new Configuration();
conf.set("fs.test.impl.disable.cache", "true");
conf.setClass("fs.test.impl", MockFileSystem.class, FileSystem.class);
conf.set(FileInputFormat.INPUT_DIR, "test:///a1");
return conf;
}
static class MockFileSystem extends RawLocalFileSystem {
int numListLocatedStatusCalls = 0;
@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
if (f.toString().equals("test:/a1")) {
return new FileStatus[] {
new FileStatus(0, true, 1, 150, 150, new Path("test:/a1/a2")),
new FileStatus(10, false, 1, 150, 150, new Path("test:/a1/file1")) };
} else if (f.toString().equals("test:/a1/a2")) {
return new FileStatus[] {
new FileStatus(10, false, 1, 150, 150,
new Path("test:/a1/a2/file2")),
new FileStatus(10, false, 1, 151, 150,
new Path("test:/a1/a2/file3")) };
}
return new FileStatus[0];
}
@Override
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
return new FileStatus[] { new FileStatus(10, true, 1, 150, 150,
pathPattern) };
}
@Override
public FileStatus[] listStatus(Path f, PathFilter filter)
throws FileNotFoundException, IOException {
return this.listStatus(f);
}
@Override
public BlockLocation[] getFileBlockLocations(Path p, long start, long len)
throws IOException {
return new BlockLocation[] {
new BlockLocation(new String[] { "localhost:50010", "otherhost:50010" },
new String[] { "localhost", "otherhost" }, new String[] { "localhost" },
new String[0], 0, len, false) }; }
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f,
PathFilter filter) throws FileNotFoundException, IOException {
++numListLocatedStatusCalls;
return super.listLocatedStatus(f, filter);
}
}
}
| 16,355 | 36.172727 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.FileWriter;
import java.io.IOException;
import java.io.File;
import org.junit.Assert;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapred.Task.TaskReporter;
import org.mockito.Mockito;
import org.junit.Test;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class TestCombineFileRecordReader {
private static Path outDir = new Path(System.getProperty("test.build.data",
"/tmp"), TestCombineFileRecordReader.class.getName());
private static class TextRecordReaderWrapper
extends CombineFileRecordReaderWrapper<LongWritable,Text> {
// this constructor signature is required by CombineFileRecordReader
public TextRecordReaderWrapper(org.apache.hadoop.mapreduce.lib.input.CombineFileSplit split,
TaskAttemptContext context, Integer idx)
throws IOException, InterruptedException {
super(new TextInputFormat(), split, context, idx);
}
}
@SuppressWarnings("unchecked")
@Test
public void testProgressIsReportedIfInputASeriesOfEmptyFiles() throws IOException, InterruptedException {
JobConf conf = new JobConf();
Path[] paths = new Path[3];
File[] files = new File[3];
long[] fileLength = new long[3];
try {
for(int i=0;i<3;i++){
File dir = new File(outDir.toString());
dir.mkdir();
files[i] = new File(dir,"testfile"+i);
FileWriter fileWriter = new FileWriter(files[i]);
fileWriter.flush();
fileWriter.close();
fileLength[i] = i;
paths[i] = new Path(outDir+"/testfile"+i);
}
CombineFileSplit combineFileSplit = new CombineFileSplit(paths, fileLength);
TaskAttemptID taskAttemptID = Mockito.mock(TaskAttemptID.class);
TaskReporter reporter = Mockito.mock(TaskReporter.class);
TaskAttemptContextImpl taskAttemptContext =
new TaskAttemptContextImpl(conf, taskAttemptID,reporter);
CombineFileRecordReader cfrr = new CombineFileRecordReader(combineFileSplit,
taskAttemptContext, TextRecordReaderWrapper.class);
cfrr.initialize(combineFileSplit,taskAttemptContext);
verify(reporter).progress();
Assert.assertFalse(cfrr.nextKeyValue());
verify(reporter, times(3)).progress();
} finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
}
| 3,534 | 35.443299 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.junit.Test;
public class TestLineRecordReader {
private static Path workDir = new Path(new Path(System.getProperty(
"test.build.data", "target"), "data"), "TestTextInputFormat");
private static Path inputDir = new Path(workDir, "input");
private void testSplitRecords(String testFileName, long firstSplitLength)
throws IOException {
URL testFileUrl = getClass().getClassLoader().getResource(testFileName);
assertNotNull("Cannot find " + testFileName, testFileUrl);
File testFile = new File(testFileUrl.getFile());
long testFileSize = testFile.length();
Path testFilePath = new Path(testFile.getAbsolutePath());
Configuration conf = new Configuration();
testSplitRecordsForFile(conf, firstSplitLength, testFileSize, testFilePath);
}
private void testSplitRecordsForFile(Configuration conf,
long firstSplitLength, long testFileSize, Path testFilePath)
throws IOException {
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
assertTrue("unexpected test data at " + testFilePath,
testFileSize > firstSplitLength);
String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
}
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// read the data without splitting to count the records
FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
(String[])null);
LineRecordReader reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsNoSplits = 0;
while (reader.nextKeyValue()) {
++numRecordsNoSplits;
}
reader.close();
// count the records in the first split
split = new FileSplit(testFilePath, 0, firstSplitLength, (String[])null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsFirstSplit = 0;
while (reader.nextKeyValue()) {
++numRecordsFirstSplit;
}
reader.close();
// count the records in the second split
split = new FileSplit(testFilePath, firstSplitLength,
testFileSize - firstSplitLength, (String[])null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsRemainingSplits = 0;
while (reader.nextKeyValue()) {
++numRecordsRemainingSplits;
}
reader.close();
assertEquals("Unexpected number of records in split ", numRecordsNoSplits,
numRecordsFirstSplit + numRecordsRemainingSplits);
}
@Test
public void testBzip2SplitEndsAtCR() throws IOException {
// the test data contains a carriage-return at the end of the first
// split which ends at compressed offset 136498 and the next
// character is not a linefeed
testSplitRecords("blockEndingInCR.txt.bz2", 136498);
}
@Test
public void testBzip2SplitEndsAtCRThenLF() throws IOException {
// the test data contains a carriage-return at the end of the first
// split which ends at compressed offset 136498 and the next
// character is a linefeed
testSplitRecords("blockEndingInCRThenLF.txt.bz2", 136498);
}
@Test(expected=IOException.class)
public void testSafeguardSplittingUnsplittableFiles() throws IOException {
// The LineRecordReader must fail when trying to read a file that
// was compressed using an unsplittable file format
testSplitRecords("TestSafeguardSplittingUnsplittableFiles.txt.gz", 2);
}
// Use the LineRecordReader to read records from the file
public ArrayList<String> readRecords(URL testFileUrl, int splitSize)
throws IOException {
// Set up context
File testFile = new File(testFileUrl.getFile());
long testFileSize = testFile.length();
Path testFilePath = new Path(testFile.getAbsolutePath());
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", 1);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// Gather the records returned by the record reader
ArrayList<String> records = new ArrayList<String>();
long offset = 0;
while (offset < testFileSize) {
FileSplit split = new FileSplit(testFilePath, offset, splitSize, null);
LineRecordReader reader = new LineRecordReader();
reader.initialize(split, context);
while (reader.nextKeyValue()) {
records.add(reader.getCurrentValue().toString());
}
offset += splitSize;
}
return records;
}
// Gather the records by just splitting on new lines
public String[] readRecordsDirectly(URL testFileUrl, boolean bzip)
throws IOException {
int MAX_DATA_SIZE = 1024 * 1024;
byte[] data = new byte[MAX_DATA_SIZE];
FileInputStream fis = new FileInputStream(testFileUrl.getFile());
int count;
if (bzip) {
BZip2CompressorInputStream bzIn = new BZip2CompressorInputStream(fis);
count = bzIn.read(data);
bzIn.close();
} else {
count = fis.read(data);
}
fis.close();
assertTrue("Test file data too big for buffer", count < data.length);
return new String(data, 0, count, "UTF-8").split("\n");
}
public void checkRecordSpanningMultipleSplits(String testFile,
int splitSize,
boolean bzip)
throws IOException {
URL testFileUrl = getClass().getClassLoader().getResource(testFile);
ArrayList<String> records = readRecords(testFileUrl, splitSize);
String[] actuals = readRecordsDirectly(testFileUrl, bzip);
assertEquals("Wrong number of records", actuals.length, records.size());
boolean hasLargeRecord = false;
for (int i = 0; i < actuals.length; ++i) {
assertEquals(actuals[i], records.get(i));
if (actuals[i].length() > 2 * splitSize) {
hasLargeRecord = true;
}
}
assertTrue("Invalid test data. Doesn't have a large enough record",
hasLargeRecord);
}
@Test
public void testRecordSpanningMultipleSplits()
throws IOException {
checkRecordSpanningMultipleSplits("recordSpanningMultipleSplits.txt",
10,
false);
}
@Test
public void testRecordSpanningMultipleSplitsCompressed()
throws IOException {
// The file is generated with bz2 block size of 100k. The split size
// needs to be larger than that for the CompressedSplitLineReader to
// work.
checkRecordSpanningMultipleSplits("recordSpanningMultipleSplits.txt.bz2",
200 * 1000,
true);
}
@Test
public void testStripBOM() throws IOException {
// the test data contains a BOM at the start of the file
// confirm the BOM is skipped by LineRecordReader
String UTF8_BOM = "\uFEFF";
URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
assertNotNull("Cannot find testBOM.txt", testFileUrl);
File testFile = new File(testFileUrl.getFile());
Path testFilePath = new Path(testFile.getAbsolutePath());
long testFileSize = testFile.length();
Configuration conf = new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// read the data and check whether BOM is skipped
FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
(String[])null);
LineRecordReader reader = new LineRecordReader();
reader.initialize(split, context);
int numRecords = 0;
boolean firstLine = true;
boolean skipBOM = true;
while (reader.nextKeyValue()) {
if (firstLine) {
firstLine = false;
if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) {
skipBOM = false;
}
}
++numRecords;
}
reader.close();
assertTrue("BOM is not skipped", skipBOM);
}
@Test
public void testMultipleClose() throws IOException {
URL testFileUrl = getClass().getClassLoader().
getResource("recordSpanningMultipleSplits.txt.bz2");
assertNotNull("Cannot find recordSpanningMultipleSplits.txt.bz2",
testFileUrl);
File testFile = new File(testFileUrl.getFile());
Path testFilePath = new Path(testFile.getAbsolutePath());
long testFileSize = testFile.length();
Configuration conf = new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// read the data and check whether BOM is skipped
FileSplit split = new FileSplit(testFilePath, 0, testFileSize, null);
LineRecordReader reader = new LineRecordReader();
reader.initialize(split, context);
//noinspection StatementWithEmptyBody
while (reader.nextKeyValue()) ;
reader.close();
reader.close();
BZip2Codec codec = new BZip2Codec();
codec.setConf(conf);
Set<Decompressor> decompressors = new HashSet<Decompressor>();
for (int i = 0; i < 10; ++i) {
decompressors.add(CodecPool.getDecompressor(codec));
}
assertEquals(10, decompressors.size());
}
/**
* Writes the input test file
*
* @param conf
* @return Path of the file created
* @throws IOException
*/
private Path createInputFile(Configuration conf, String data)
throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(inputDir, "test.txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
writer.write(data);
} finally {
writer.close();
}
return file;
}
@Test
public void testUncompressedInput() throws Exception {
Configuration conf = new Configuration();
String inputData = "abc+++def+++ghi+++"
+ "jkl+++mno+++pqr+++stu+++vw +++xyz";
Path inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+++");
for(int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for(int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
@Test
public void testUncompressedInputContainingCRLF() throws Exception {
Configuration conf = new Configuration();
String inputData = "a\r\nb\rc\nd\r\n";
Path inputFile = createInputFile(conf, inputData);
for(int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for(int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
}
| 12,978 | 36.62029 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
public class TestJobEndNotifier extends TestCase {
HttpServer2 server;
URL baseUrl;
@SuppressWarnings("serial")
public static class JobEndServlet extends HttpServlet {
public static volatile int calledTimes = 0;
public static URI requestUri;
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
InputStreamReader in = new InputStreamReader(request.getInputStream());
PrintStream out = new PrintStream(response.getOutputStream());
calledTimes++;
try {
requestUri = new URI(null, null,
request.getRequestURI(), request.getQueryString(), null);
} catch (URISyntaxException e) {
}
in.close();
out.close();
}
}
// Servlet that delays requests for a long time
@SuppressWarnings("serial")
public static class DelayServlet extends HttpServlet {
public static volatile int calledTimes = 0;
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
boolean timedOut = false;
calledTimes++;
try {
// Sleep for a long time
Thread.sleep(1000000);
} catch (InterruptedException e) {
timedOut = true;
}
assertTrue("DelayServlet should be interrupted", timedOut);
}
}
// Servlet that fails all requests into it
@SuppressWarnings("serial")
public static class FailServlet extends HttpServlet {
public static volatile int calledTimes = 0;
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
calledTimes++;
throw new IOException("I am failing!");
}
}
public void setUp() throws Exception {
new File(System.getProperty("build.webapps", "build/webapps") + "/test"
).mkdirs();
server = new HttpServer2.Builder().setName("test")
.addEndpoint(URI.create("http://localhost:0"))
.setFindPort(true).build();
server.addServlet("delay", "/delay", DelayServlet.class);
server.addServlet("jobend", "/jobend", JobEndServlet.class);
server.addServlet("fail", "/fail", FailServlet.class);
server.start();
int port = server.getConnectorAddress(0).getPort();
baseUrl = new URL("http://localhost:" + port + "/");
JobEndServlet.calledTimes = 0;
JobEndServlet.requestUri = null;
DelayServlet.calledTimes = 0;
FailServlet.calledTimes = 0;
}
public void tearDown() throws Exception {
server.stop();
}
/**
* Basic validation for localRunnerNotification.
*/
public void testLocalJobRunnerUriSubstitution() throws InterruptedException {
JobStatus jobStatus = createTestJobStatus(
"job_20130313155005308_0001", JobStatus.SUCCEEDED);
JobConf jobConf = createTestJobConf(
new Configuration(), 0,
baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobEndNotifier.localRunnerNotification(jobConf, jobStatus);
// No need to wait for the notification to go thru since calls are
// synchronous
// Validate params
assertEquals(1, JobEndServlet.calledTimes);
assertEquals("jobid=job_20130313155005308_0001&status=SUCCEEDED",
JobEndServlet.requestUri.getQuery());
}
/**
* Validate job.end.retry.attempts for the localJobRunner.
*/
public void testLocalJobRunnerRetryCount() throws InterruptedException {
int retryAttempts = 3;
JobStatus jobStatus = createTestJobStatus(
"job_20130313155005308_0001", JobStatus.SUCCEEDED);
JobConf jobConf = createTestJobConf(
new Configuration(), retryAttempts, baseUrl + "fail");
JobEndNotifier.localRunnerNotification(jobConf, jobStatus);
// Validate params
assertEquals(retryAttempts + 1, FailServlet.calledTimes);
}
/**
* Validate that the notification times out after reaching
* mapreduce.job.end-notification.timeout.
*/
public void testNotificationTimeout() throws InterruptedException {
Configuration conf = new Configuration();
// Reduce the timeout to 1 second
conf.setInt("mapreduce.job.end-notification.timeout", 1000);
JobStatus jobStatus = createTestJobStatus(
"job_20130313155005308_0001", JobStatus.SUCCEEDED);
JobConf jobConf = createTestJobConf(
conf, 0,
baseUrl + "delay");
long startTime = System.currentTimeMillis();
JobEndNotifier.localRunnerNotification(jobConf, jobStatus);
long elapsedTime = System.currentTimeMillis() - startTime;
// Validate params
assertEquals(1, DelayServlet.calledTimes);
// Make sure we timed out with time slightly above 1 second
// (default timeout is in terms of minutes, so we'll catch the problem)
assertTrue(elapsedTime < 2000);
}
private static JobStatus createTestJobStatus(String jobId, int state) {
return new JobStatus(
JobID.forName(jobId), 0.5f, 0.0f,
state, "root", "TestJobEndNotifier", null, null);
}
private static JobConf createTestJobConf(
Configuration conf, int retryAttempts, String notificationUri) {
JobConf jobConf = new JobConf(conf);
jobConf.setInt("job.end.retry.attempts", retryAttempts);
jobConf.set("job.end.retry.interval", "0");
jobConf.setJobEndNotificationURI(notificationUri);
return jobConf;
}
}
| 6,834 | 33.175 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* test class JobInfo
*
*
*/
public class TestJobInfo {
@Test (timeout=5000)
public void testJobInfo() throws IOException {
JobID jid = new JobID("001", 1);
Text user = new Text("User");
Path path = new Path("/tmp/test");
JobInfo info = new JobInfo(jid, user, path);
ByteArrayOutputStream out = new ByteArrayOutputStream();
info.write(new DataOutputStream(out));
JobInfo copyinfo = new JobInfo();
copyinfo.readFields(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(info.getJobID().toString(), copyinfo.getJobID().toString());
assertEquals(info.getJobSubmitDir().getName(), copyinfo.getJobSubmitDir()
.getName());
assertEquals(info.getUser().toString(), copyinfo.getUser().toString());
}
@Test(timeout = 5000)
public void testTaskID() throws IOException, InterruptedException {
JobID jobid = new JobID("1014873536921", 6);
TaskID tid = new TaskID(jobid, TaskType.MAP, 0);
org.apache.hadoop.mapred.TaskID tid1 =
org.apache.hadoop.mapred.TaskID.downgrade(tid);
org.apache.hadoop.mapred.TaskReport treport =
new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f,
State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100,
new org.apache.hadoop.mapred.Counters());
Assert
.assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000");
Assert.assertEquals(treport.getTaskID().toString(),
"task_1014873536921_0006_m_000000");
}
}
| 2,865 | 35.74359 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Lists;
@RunWith(value = Parameterized.class)
public class TestFileInputFormat {
private static final Log LOG = LogFactory.getLog(TestFileInputFormat.class);
private static String testTmpDir = System.getProperty("test.build.data", "/tmp");
private static final Path TEST_ROOT_DIR = new Path(testTmpDir, "TestFIF");
private static FileSystem localFs;
private int numThreads;
public TestFileInputFormat(int numThreads) {
this.numThreads = numThreads;
LOG.info("Running with numThreads: " + numThreads);
}
@Parameters
public static Collection<Object[]> data() {
Object[][] data = new Object[][] { { 1 }, { 5 }};
return Arrays.asList(data);
}
@Before
public void setup() throws IOException {
LOG.info("Using Test Dir: " + TEST_ROOT_DIR);
localFs = FileSystem.getLocal(new Configuration());
localFs.delete(TEST_ROOT_DIR, true);
localFs.mkdirs(TEST_ROOT_DIR);
}
@After
public void cleanup() throws IOException {
localFs.delete(TEST_ROOT_DIR, true);
}
@Test
public void testListLocatedStatus() throws Exception {
Configuration conf = getConfiguration();
conf.setBoolean("fs.test.impl.disable.cache", false);
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
"test:///a1/a2");
MockFileSystem mockFs =
(MockFileSystem) new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",
0, mockFs.numListLocatedStatusCalls);
JobConf job = new JobConf(conf);
TextInputFormat fileInputFormat = new TextInputFormat();
fileInputFormat.configure(job);
InputSplit[] splits = fileInputFormat.getSplits(job, 1);
Assert.assertEquals("Input splits are not correct", 2, splits.length);
Assert.assertEquals("listLocatedStatuss calls",
1, mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
}
@Test
public void testSplitLocationInfo() throws Exception {
Configuration conf = getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
"test:///a1/a2");
JobConf job = new JobConf(conf);
TextInputFormat fileInputFormat = new TextInputFormat();
fileInputFormat.configure(job);
FileSplit[] splits = (FileSplit[]) fileInputFormat.getSplits(job, 1);
String[] locations = splits[0].getLocations();
Assert.assertEquals(2, locations.length);
SplitLocationInfo[] locationInfo = splits[0].getLocationInfo();
Assert.assertEquals(2, locationInfo.length);
SplitLocationInfo localhostInfo = locations[0].equals("localhost") ?
locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo = locations[0].equals("otherhost") ?
locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
@Test
public void testListStatusSimple() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestSimple(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses),
localFs);
}
@Test
public void testListStatusNestedRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestNestedRecursive(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses),
localFs);
}
@Test
public void testListStatusNestedNonRecursive() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestNestedNonRecursive(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses),
localFs);
}
@Test
public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestErrorOnNonExistantDir(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
try {
fif.listStatus(jobConf);
Assert.fail("Expecting an IOException for a missing Input path");
} catch (IOException e) {
Path expectedExceptionPath = new Path(TEST_ROOT_DIR, "input2");
expectedExceptionPath = localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals(
"Input path does not exist: " + expectedExceptionPath.toString(),
e.getMessage());
}
}
private Configuration getConfiguration() {
Configuration conf = new Configuration();
conf.set("fs.test.impl.disable.cache", "true");
conf.setClass("fs.test.impl", MockFileSystem.class, FileSystem.class);
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
"test:///a1");
return conf;
}
static class MockFileSystem extends RawLocalFileSystem {
int numListLocatedStatusCalls = 0;
@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
if (f.toString().equals("test:/a1")) {
return new FileStatus[] {
new FileStatus(0, true, 1, 150, 150, new Path("test:/a1/a2")),
new FileStatus(10, false, 1, 150, 150, new Path("test:/a1/file1")) };
} else if (f.toString().equals("test:/a1/a2")) {
return new FileStatus[] {
new FileStatus(10, false, 1, 150, 150,
new Path("test:/a1/a2/file2")),
new FileStatus(10, false, 1, 151, 150,
new Path("test:/a1/a2/file3")) };
}
return new FileStatus[0];
}
@Override
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
return new FileStatus[] { new FileStatus(10, true, 1, 150, 150,
pathPattern) };
}
@Override
public FileStatus[] listStatus(Path f, PathFilter filter)
throws FileNotFoundException, IOException {
return this.listStatus(f);
}
@Override
public BlockLocation[] getFileBlockLocations(Path p, long start, long len)
throws IOException {
return new BlockLocation[] {
new BlockLocation(new String[] { "localhost:50010", "otherhost:50010" },
new String[] { "localhost", "otherhost" }, new String[] { "localhost" },
new String[0], 0, len, false) };
}
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f,
PathFilter filter) throws FileNotFoundException, IOException {
++numListLocatedStatusCalls;
return super.listLocatedStatus(f, filter);
}
}
}
| 9,898 | 36.782443 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapred.SortedRanges.Range;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
public class TestTaskProgressReporter {
private static int statusUpdateTimes = 0;
private FakeUmbilical fakeUmbilical = new FakeUmbilical();
private static class DummyTask extends Task {
@Override
public void run(JobConf job, TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException {
}
@Override
public boolean isMapTask() {
return true;
}
}
private static class FakeUmbilical implements TaskUmbilicalProtocol {
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return 0;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return null;
}
@Override
public JvmTask getTask(JvmContext context) throws IOException {
return null;
}
@Override
public boolean statusUpdate(TaskAttemptID taskId,
TaskStatus taskStatus) throws IOException, InterruptedException {
return true;
}
@Override
public void reportDiagnosticInfo(TaskAttemptID taskid, String trace)
throws IOException {
}
@Override
public void reportNextRecordRange(TaskAttemptID taskid, Range range)
throws IOException {
}
@Override
public void done(TaskAttemptID taskid) throws IOException {
}
@Override
public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException {
}
@Override
public boolean canCommit(TaskAttemptID taskid) throws IOException {
return false;
}
@Override
public void shuffleError(TaskAttemptID taskId, String message)
throws IOException {
}
@Override
public void fsError(TaskAttemptID taskId, String message)
throws IOException {
}
@Override
public void fatalError(TaskAttemptID taskId, String message)
throws IOException {
}
@Override
public MapTaskCompletionEventsUpdate getMapCompletionEvents(
JobID jobId, int fromIndex, int maxLocs, TaskAttemptID id)
throws IOException {
return null;
}
@Override
public boolean ping(TaskAttemptID taskid) throws IOException {
statusUpdateTimes++;
return true;
}
}
private class DummyTaskReporter extends Task.TaskReporter {
public DummyTaskReporter(Task task) {
task.super(task.getProgress(), fakeUmbilical);
}
@Override
public void setProgress(float progress) {
super.setProgress(progress);
}
}
@Test (timeout=10000)
public void testTaskProgress() throws Exception {
JobConf job = new JobConf();
job.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 1000);
Task task = new DummyTask();
task.setConf(job);
DummyTaskReporter reporter = new DummyTaskReporter(task);
Thread t = new Thread(reporter);
t.start();
Thread.sleep(2100);
task.setTaskDone();
reporter.resetDoneFlag();
t.join();
Assert.assertEquals(statusUpdateTimes, 2);
}
}
| 4,174 | 27.401361 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Random;
import java.util.zip.CRC32;
import java.util.zip.CheckedOutputStream;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import junit.framework.TestCase;
public class TestIndexCache extends TestCase {
private JobConf conf;
private FileSystem fs;
private Path p;
@Override
public void setUp() throws IOException {
conf = new JobConf();
fs = FileSystem.getLocal(conf).getRaw();
p = new Path(System.getProperty("test.build.data", "/tmp"),
"cache").makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
public void testLRCPolicy() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("seed: " + seed);
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
final int partsPerMap = 1000;
final int bytesPerFile = partsPerMap * 24;
IndexCache cache = new IndexCache(conf);
// fill cache
int totalsize = bytesPerFile;
for (; totalsize < 1024 * 1024; totalsize += bytesPerFile) {
Path f = new Path(p, Integer.toString(totalsize, 36));
writeFile(fs, f, totalsize, partsPerMap);
IndexRecord rec = cache.getIndexInformation(
Integer.toString(totalsize, 36), r.nextInt(partsPerMap), f,
UserGroupInformation.getCurrentUser().getShortUserName());
checkRecord(rec, totalsize);
}
// delete files, ensure cache retains all elem
for (FileStatus stat : fs.listStatus(p)) {
fs.delete(stat.getPath(),true);
}
for (int i = bytesPerFile; i < 1024 * 1024; i += bytesPerFile) {
Path f = new Path(p, Integer.toString(i, 36));
IndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36),
r.nextInt(partsPerMap), f,
UserGroupInformation.getCurrentUser().getShortUserName());
checkRecord(rec, i);
}
// push oldest (bytesPerFile) out of cache
Path f = new Path(p, Integer.toString(totalsize, 36));
writeFile(fs, f, totalsize, partsPerMap);
cache.getIndexInformation(Integer.toString(totalsize, 36),
r.nextInt(partsPerMap), f,
UserGroupInformation.getCurrentUser().getShortUserName());
fs.delete(f, false);
// oldest fails to read, or error
boolean fnf = false;
try {
cache.getIndexInformation(Integer.toString(bytesPerFile, 36),
r.nextInt(partsPerMap), new Path(p, Integer.toString(bytesPerFile)),
UserGroupInformation.getCurrentUser().getShortUserName());
} catch (IOException e) {
if (e.getCause() == null ||
!(e.getCause() instanceof FileNotFoundException)) {
throw e;
}
else {
fnf = true;
}
}
if (!fnf)
fail("Failed to push out last entry");
// should find all the other entries
for (int i = bytesPerFile << 1; i < 1024 * 1024; i += bytesPerFile) {
IndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36),
r.nextInt(partsPerMap), new Path(p, Integer.toString(i, 36)),
UserGroupInformation.getCurrentUser().getShortUserName());
checkRecord(rec, i);
}
IndexRecord rec = cache.getIndexInformation(Integer.toString(totalsize, 36),
r.nextInt(partsPerMap), f,
UserGroupInformation.getCurrentUser().getShortUserName());
checkRecord(rec, totalsize);
}
public void testBadIndex() throws Exception {
final int parts = 30;
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
IndexCache cache = new IndexCache(conf);
Path f = new Path(p, "badindex");
FSDataOutputStream out = fs.create(f, false);
CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
DataOutputStream dout = new DataOutputStream(iout);
for (int i = 0; i < parts; ++i) {
for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
if (0 == (i % 3)) {
dout.writeLong(i);
} else {
out.writeLong(i);
}
}
}
out.writeLong(iout.getChecksum().getValue());
dout.close();
try {
cache.getIndexInformation("badindex", 7, f,
UserGroupInformation.getCurrentUser().getShortUserName());
fail("Did not detect bad checksum");
} catch (IOException e) {
if (!(e.getCause() instanceof ChecksumException)) {
throw e;
}
}
}
public void testInvalidReduceNumberOrLength() throws Exception {
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
final int partsPerMap = 1000;
final int bytesPerFile = partsPerMap * 24;
IndexCache cache = new IndexCache(conf);
// fill cache
Path feq = new Path(p, "invalidReduceOrPartsPerMap");
writeFile(fs, feq, bytesPerFile, partsPerMap);
// Number of reducers should always be less than partsPerMap as reducer
// numbers start from 0 and there cannot be more reducer than parts
try {
// Number of reducers equal to partsPerMap
cache.getIndexInformation("reduceEqualPartsPerMap",
partsPerMap, // reduce number == partsPerMap
feq, UserGroupInformation.getCurrentUser().getShortUserName());
fail("Number of reducers equal to partsPerMap did not fail");
} catch (Exception e) {
if (!(e instanceof IOException)) {
throw e;
}
}
try {
// Number of reducers more than partsPerMap
cache.getIndexInformation(
"reduceMorePartsPerMap",
partsPerMap + 1, // reduce number > partsPerMap
feq, UserGroupInformation.getCurrentUser().getShortUserName());
fail("Number of reducers more than partsPerMap did not fail");
} catch (Exception e) {
if (!(e instanceof IOException)) {
throw e;
}
}
}
public void testRemoveMap() throws Exception {
// This test case use two thread to call getIndexInformation and
// removeMap concurrently, in order to construct race condition.
// This test case may not repeatable. But on my macbook this test
// fails with probability of 100% on code before MAPREDUCE-2541,
// so it is repeatable in practice.
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 10);
// Make a big file so removeMapThread almost surely runs faster than
// getInfoThread
final int partsPerMap = 100000;
final int bytesPerFile = partsPerMap * 24;
final IndexCache cache = new IndexCache(conf);
final Path big = new Path(p, "bigIndex");
final String user =
UserGroupInformation.getCurrentUser().getShortUserName();
writeFile(fs, big, bytesPerFile, partsPerMap);
// run multiple times
for (int i = 0; i < 20; ++i) {
Thread getInfoThread = new Thread() {
@Override
public void run() {
try {
cache.getIndexInformation("bigIndex", partsPerMap, big, user);
} catch (Exception e) {
// should not be here
}
}
};
Thread removeMapThread = new Thread() {
@Override
public void run() {
cache.removeMap("bigIndex");
}
};
if (i%2==0) {
getInfoThread.start();
removeMapThread.start();
} else {
removeMapThread.start();
getInfoThread.start();
}
getInfoThread.join();
removeMapThread.join();
assertEquals(true, cache.checkTotalMemoryUsed());
}
}
public void testCreateRace() throws Exception {
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
final int partsPerMap = 1000;
final int bytesPerFile = partsPerMap * 24;
final IndexCache cache = new IndexCache(conf);
final Path racy = new Path(p, "racyIndex");
final String user =
UserGroupInformation.getCurrentUser().getShortUserName();
writeFile(fs, racy, bytesPerFile, partsPerMap);
// run multiple instances
Thread[] getInfoThreads = new Thread[50];
for (int i = 0; i < 50; i++) {
getInfoThreads[i] = new Thread() {
@Override
public void run() {
try {
cache.getIndexInformation("racyIndex", partsPerMap, racy, user);
cache.removeMap("racyIndex");
} catch (Exception e) {
// should not be here
}
}
};
}
for (int i = 0; i < 50; i++) {
getInfoThreads[i].start();
}
final Thread mainTestThread = Thread.currentThread();
Thread timeoutThread = new Thread() {
@Override
public void run() {
try {
Thread.sleep(15000);
mainTestThread.interrupt();
} catch (InterruptedException ie) {
// we are done;
}
}
};
for (int i = 0; i < 50; i++) {
try {
getInfoThreads[i].join();
} catch (InterruptedException ie) {
// we haven't finished in time. Potential deadlock/race.
fail("Unexpectedly long delay during concurrent cache entry creations");
}
}
// stop the timeoutThread. If we get interrupted before stopping, there
// must be something wrong, although it wasn't a deadlock. No need to
// catch and swallow.
timeoutThread.interrupt();
}
private static void checkRecord(IndexRecord rec, long fill) {
assertEquals(fill, rec.startOffset);
assertEquals(fill, rec.rawLength);
assertEquals(fill, rec.partLength);
}
private static void writeFile(FileSystem fs, Path f, long fill, int parts)
throws IOException {
FSDataOutputStream out = fs.create(f, false);
CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
DataOutputStream dout = new DataOutputStream(iout);
for (int i = 0; i < parts; ++i) {
for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
dout.writeLong(fill);
}
}
out.writeLong(iout.getChecksum().getValue());
dout.close();
}
}
| 11,100 | 33.156923 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
/**
* TestCounters checks the sanity and recoverability of Queue
*/
public class TestTaskLog {
/**
* test TaskAttemptID
*
* @throws IOException
*/
@Test (timeout=50000)
public void testTaskLog() throws IOException {
// test TaskLog
System.setProperty(
YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR, "testString");
assertEquals(TaskLog.getMRv2LogDir(), "testString");
TaskAttemptID taid = mock(TaskAttemptID.class);
JobID jid = new JobID("job", 1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("testString"
+ File.separatorChar + "stdout"));
// test getRealTaskLogFileLocation
File indexFile = TaskLog.getIndexFile(taid, true);
if (!indexFile.getParentFile().exists()) {
indexFile.getParentFile().mkdirs();
}
indexFile.delete();
indexFile.createNewFile();
TaskLog.syncLogs("location", taid, true);
assertTrue(indexFile.getAbsolutePath().endsWith(
"userlogs" + File.separatorChar + "job_job_0001"
+ File.separatorChar + "JobId.cleanup"
+ File.separatorChar + "log.index"));
f = TaskLog.getRealTaskLogFileLocation(taid, true, LogName.DEBUGOUT);
if (f != null) {
assertTrue(f.getAbsolutePath().endsWith("location"
+ File.separatorChar + "debugout"));
FileUtils.copyFile(indexFile, f);
}
// test obtainLogDirOwner
assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0);
// test TaskLog.Reader
assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT, taid, true).length() > 0);
}
public String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
/**
* test without TASK_LOG_DIR
*
* @throws IOException
*/
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
// TaskLog tasklog= new TaskLog();
System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
// test TaskLog
assertEquals(TaskLog.getMRv2LogDir(), null);
TaskAttemptID taid = mock(TaskAttemptID.class);
JobID jid = new JobID("job", 1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
}
| 4,433 | 30.006993 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobAclsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Test;
/**
* Test the job acls manager
*/
public class TestJobAclsManager {
@Test
public void testClusterAdmins() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String clusterAdmin = "testuser2";
conf.set(MRConfig.MR_ADMINS, clusterAdmin);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
clusterAdmin, new String[] {});
// cluster admin should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("cluster admin should have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("cluster admin should have modify access", val);
}
@Test
public void testClusterNoAdmins() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), "");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String noAdminUser = "testuser2";
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
noAdminUser, new String[] {});
// random user should not have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertFalse("random user should not have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertFalse("random user should not have modify access", val);
callerUGI = UserGroupInformation.createUserForTesting(jobOwner,
new String[] {});
// Owner should have access
val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("owner should have view access", val);
val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("owner should have modify access", val);
}
@Test
public void testAclsOff() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
String noAdminUser = "testuser2";
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
noAdminUser, new String[] {});
// acls off so anyone should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("acls off so anyone should have access", val);
}
@Test
public void testGroups() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String user = "testuser2";
String adminGroup = "adminGroup";
conf.set(MRConfig.MR_ADMINS, " " + adminGroup);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
user, new String[] {adminGroup});
// acls off so anyone should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access", val);
}
}
| 5,866 | 40.027972 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.text.ParseException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Random;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.CountersExceededException;
import org.apache.hadoop.mapred.Counters.Group;
import org.apache.hadoop.mapred.Counters.GroupFactory;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup;
import org.apache.hadoop.mapreduce.counters.CounterGroupFactory.FrameworkGroupFactory;
import org.junit.Test;
/**
* TestCounters checks the sanity and recoverability of {@code Counters}
*/
public class TestCounters {
enum myCounters {TEST1, TEST2};
private static final long MAX_VALUE = 10;
private static final Log LOG = LogFactory.getLog(TestCounters.class);
static final Enum<?> FRAMEWORK_COUNTER = TaskCounter.CPU_MILLISECONDS;
static final long FRAMEWORK_COUNTER_VALUE = 8;
static final String FS_SCHEME = "HDFS";
static final FileSystemCounter FS_COUNTER = FileSystemCounter.BYTES_READ;
static final long FS_COUNTER_VALUE = 10;
// Generates enum based counters
private Counters getEnumCounters(Enum[] keys) {
Counters counters = new Counters();
for (Enum key : keys) {
for (long i = 0; i < MAX_VALUE; ++i) {
counters.incrCounter(key, i);
}
}
return counters;
}
// Generate string based counters
private Counters getEnumCounters(String[] gNames, String[] cNames) {
Counters counters = new Counters();
for (String gName : gNames) {
for (String cName : cNames) {
for (long i = 0; i < MAX_VALUE; ++i) {
counters.incrCounter(gName, cName, i);
}
}
}
return counters;
}
/**
* Test counter recovery
*/
private void testCounter(Counters counter) throws ParseException {
String compactEscapedString = counter.makeEscapedCompactString();
assertFalse("compactEscapedString should not contain null",
compactEscapedString.contains("null"));
Counters recoveredCounter =
Counters.fromEscapedCompactString(compactEscapedString);
// Check for recovery from string
assertEquals("Recovered counter does not match on content",
counter, recoveredCounter);
}
@Test
public void testCounters() throws IOException {
Enum[] keysWithResource = {TaskCounter.MAP_INPUT_RECORDS,
TaskCounter.MAP_OUTPUT_BYTES};
Enum[] keysWithoutResource = {myCounters.TEST1, myCounters.TEST2};
String[] groups = {"group1", "group2", "group{}()[]"};
String[] counters = {"counter1", "counter2", "counter{}()[]"};
try {
// I. Check enum counters that have resource bundler
testCounter(getEnumCounters(keysWithResource));
// II. Check enum counters that dont have resource bundler
testCounter(getEnumCounters(keysWithoutResource));
// III. Check string counters
testCounter(getEnumCounters(groups, counters));
} catch (ParseException pe) {
throw new IOException(pe);
}
}
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
Counters counters = new Counters();
final int NUMBER_TESTS = 100;
final int NUMBER_INC = 10;
final Random rand = new Random();
for (int i = 0; i < NUMBER_TESTS; i++) {
long initValue = rand.nextInt();
long expectedValue = initValue;
Counter counter = counters.findCounter("foo", "bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",
expectedValue, counter.getValue());
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly",
expectedValue, counter.getValue());
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",
expectedValue, counter.getValue());
}
}
@SuppressWarnings("deprecation")
@Test
public void testReadWithLegacyNames() {
Counters counters = new Counters();
counters.incrCounter(TaskCounter.MAP_INPUT_RECORDS, 1);
counters.incrCounter(JobCounter.DATA_LOCAL_MAPS, 1);
counters.findCounter("file", FileSystemCounter.BYTES_READ).increment(1);
checkLegacyNames(counters);
}
@SuppressWarnings("deprecation")
@Test
public void testWriteWithLegacyNames() {
Counters counters = new Counters();
counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1);
counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1);
counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1);
checkLegacyNames(counters);
}
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
assertEquals("New name", 1, counters.findCounter(
TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"org.apache.hadoop.mapred.Task$Counter",
"MAP_INPUT_RECORDS").getValue());
assertEquals("Legacy enum", 1,
counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());
assertEquals("New name", 1, counters.findCounter(
JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"org.apache.hadoop.mapred.JobInProgress$Counter",
"DATA_LOCAL_MAPS").getValue());
assertEquals("Legacy enum", 1,
counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());
assertEquals("New name", 1, counters.findCounter(
FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
assertEquals("New name and method", 1, counters.findCounter("file",
FileSystemCounter.BYTES_READ).getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"FileSystemCounters",
"FILE_BYTES_READ").getValue());
}
@SuppressWarnings("deprecation")
@Test
public void testCounterIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Iterator<Group> iterator = counters.iterator();
counters.incrCounter("group2", "counter2", 1);
iterator.next();
}
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Group group = counters.getGroup("group1");
Iterator<Counter> iterator = group.iterator();
counters.incrCounter("group1", "counter2", 1);
iterator.next();
}
@Test
public void testFileSystemGroupIteratorConcurrency() {
Counters counters = new Counters();
// create 2 filesystem counter groups
counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
// Iterate over the counters in this group while updating counters in
// the group
Group group = counters.getGroup(FileSystemCounter.class.getName());
Iterator<Counter> iterator = group.iterator();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
@Test
public void testLegacyGetGroupNames() {
Counters counters = new Counters();
// create 2 filesystem counter groups
counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
counters.incrCounter("group1", "counter1", 1);
HashSet<String> groups = new HashSet<String>(counters.getGroupNames());
HashSet<String> expectedGroups = new HashSet<String>();
expectedGroups.add("group1");
expectedGroups.add("FileSystemCounters"); //Legacy Name
expectedGroups.add("org.apache.hadoop.mapreduce.FileSystemCounter");
assertEquals(expectedGroups, groups);
}
@Test
public void testMakeCompactString() {
final String GC1 = "group1.counter1:1";
final String GC2 = "group2.counter2:3";
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
assertEquals("group1.counter1:1", counters.makeCompactString());
counters.incrCounter("group2", "counter2", 3);
String cs = counters.makeCompactString();
assertTrue("Bad compact string",
cs.equals(GC1 + ',' + GC2) || cs.equals(GC2 + ',' + GC1));
}
@Test
public void testCounterLimits() {
testMaxCountersLimits(new Counters());
testMaxGroupsLimits(new Counters());
}
private void testMaxCountersLimits(final Counters counters) {
for (int i = 0; i < org.apache.hadoop.mapred.Counters.MAX_COUNTER_LIMIT; ++i) {
counters.findCounter("test", "test" + i);
}
setExpected(counters);
shouldThrow(CountersExceededException.class, new Runnable() {
public void run() {
counters.findCounter("test", "bad");
}
});
checkExpected(counters);
}
private void testMaxGroupsLimits(final Counters counters) {
for (int i = 0; i < org.apache.hadoop.mapred.Counters.MAX_GROUP_LIMIT; ++i) {
// assuming COUNTERS_MAX > GROUPS_MAX
counters.findCounter("test" + i, "test");
}
setExpected(counters);
shouldThrow(CountersExceededException.class, new Runnable() {
public void run() {
counters.findCounter("bad", "test");
}
});
checkExpected(counters);
}
private void setExpected(Counters counters) {
counters.findCounter(FRAMEWORK_COUNTER).setValue(FRAMEWORK_COUNTER_VALUE);
counters.findCounter(FS_SCHEME, FS_COUNTER).setValue(FS_COUNTER_VALUE);
}
private void checkExpected(Counters counters) {
assertEquals(FRAMEWORK_COUNTER_VALUE,
counters.findCounter(FRAMEWORK_COUNTER).getValue());
assertEquals(FS_COUNTER_VALUE, counters.findCounter(FS_SCHEME, FS_COUNTER)
.getValue());
}
private void shouldThrow(Class<? extends Exception> ecls, Runnable runnable) {
try {
runnable.run();
} catch (CountersExceededException e) {
return;
}
Assert.fail("Should've thrown " + ecls.getSimpleName());
}
public static void main(String[] args) throws IOException {
new TestCounters().testCounters();
}
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
GroupFactory groupFactory = new GroupFactoryForTest();
FrameworkGroupFactory frameworkGroupFactory =
groupFactory.newFrameworkGroupFactory(JobCounter.class);
Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");
FrameworkCounterGroup counterGroup =
(FrameworkCounterGroup) group.getUnderlyingGroup();
org.apache.hadoop.mapreduce.Counter count1 =
counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
Assert.assertNotNull(count1);
// Verify no exception get thrown when finding an unknown counter
org.apache.hadoop.mapreduce.Counter count2 =
counterGroup.findCounter("Unknown");
Assert.assertNull(count2);
}
@Test
public void testFilesystemCounter() {
GroupFactory groupFactory = new GroupFactoryForTest();
Group fsGroup = groupFactory.newFileSystemGroup();
org.apache.hadoop.mapreduce.Counter count1 =
fsGroup.findCounter("ANY_BYTES_READ");
Assert.assertNotNull(count1);
// Verify no exception get thrown when finding an unknown counter
org.apache.hadoop.mapreduce.Counter count2 =
fsGroup.findCounter("Unknown");
Assert.assertNull(count2);
}
}
class GroupFactoryForTest extends GroupFactory {
public <T extends Enum<T>>
FrameworkGroupFactory<Group> newFrameworkGroupFactory(final Class<T> cls) {
return super.newFrameworkGroupFactory(cls);
}
public Group newFileSystemGroup() {
return super.newFileSystemGroup();
}
}
| 13,520 | 34.395288 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* TestCounters checks the sanity and recoverability of Queue
*/
public class TestQueue {
private static File testDir = new File(System.getProperty("test.build.data",
"/tmp"), TestJobConf.class.getSimpleName());
@Before
public void setup() {
testDir.mkdirs();
}
@After
public void cleanup() {
FileUtil.fullyDelete(testDir);
}
/**
* test QueueManager
* configuration from file
*
* @throws IOException
*/
@Test (timeout=5000)
public void testQueue() throws IOException {
File f = null;
try {
f = writeFile();
QueueManager manager = new QueueManager(f.getCanonicalPath(), true);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");
Queue root = manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator<Queue> iterator = root.getChildren().iterator();
Queue firstSubQueue = iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(
firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job")
.toString(),
"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue = iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(secondSubQueue.getProperties().getProperty("key"), "value");
assertEquals(secondSubQueue.getProperties().getProperty("key1"), "value1");
// test status
assertEquals(firstSubQueue.getState().getStateName(), "running");
assertEquals(secondSubQueue.getState().getStateName(), "stopped");
Set<String> template = new HashSet<String>();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(), template);
// test user access
UserGroupInformation mockUGI = mock(UserGroupInformation.class);
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups = { "group1" };
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("second", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
QueueAclsInfo[] qai = manager.getQueueAcls(mockUGI);
assertEquals(qai.length, 1);
// test refresh queue
manager.refreshQueues(getConfiguration(), null);
iterator = root.getChildren().iterator();
Queue firstSubQueue1 = iterator.next();
Queue secondSubQueue1 = iterator.next();
// tets equal method
assertTrue(firstSubQueue.equals(firstSubQueue1));
assertEquals(firstSubQueue1.getState().getStateName(), "running");
assertEquals(secondSubQueue1.getState().getStateName(), "stopped");
assertEquals(firstSubQueue1.getSchedulingInfo(), "queueInfo");
assertEquals(secondSubQueue1.getSchedulingInfo(), "queueInfoqueueInfo");
// test JobQueueInfo
assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(), "first");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(), "running");
assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),
"queueInfo");
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(), 0);
// test
assertEquals(manager.getSchedulerInfo("first"), "queueInfo");
Set<String> queueJobQueueInfos = new HashSet<String>();
for(JobQueueInfo jobInfo : manager.getJobQueueInfos()){
queueJobQueueInfos.add(jobInfo.getQueueName());
}
Set<String> rootJobQueueInfos = new HashSet<String>();
for(Queue queue : root.getChildren()){
rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName());
}
assertEquals(queueJobQueueInfos, rootJobQueueInfos);
// test getJobQueueInfoMapping
assertEquals(
manager.getJobQueueInfoMapping().get("first").getQueueName(), "first");
// test dumpConfiguration
Writer writer = new StringWriter();
Configuration conf = getConfiguration();
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer, f.getAbsolutePath(), conf);
String result = writer.toString();
assertTrue(result
.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0);
writer = new StringWriter();
QueueManager.dumpConfiguration(writer, conf);
result = writer.toString();
assertEquals(
"{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",
result);
// test constructor QueueAclsInfo
QueueAclsInfo qi = new QueueAclsInfo();
assertNull(qi.getQueueName());
} finally {
if (f != null) {
f.delete();
}
}
}
private Configuration getConfiguration() {
Configuration conf = new Configuration();
conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY,
"first,second");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX
+ "first.acl-submit-job", "user1,user2 group1,group2");
conf.set(MRConfig.MR_ACLS_ENABLED, "true");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX + "first.state",
"running");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX + "second.state",
"stopped");
return conf;
}
@Test (timeout=5000)
public void testDefaultConfig() {
QueueManager manager = new QueueManager(true);
assertEquals(manager.getRoot().getChildren().size(), 2);
}
/**
* test for Qmanager with empty configuration
*
* @throws IOException
*/
@Test (timeout=5000)
public void test2Queue() throws IOException {
Configuration conf = getConfiguration();
QueueManager manager = new QueueManager(conf);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");
Queue root = manager.getRoot();
// test children queues
assertTrue(root.getChildren().size() == 2);
Iterator<Queue> iterator = root.getChildren().iterator();
Queue firstSubQueue = iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(
firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job")
.toString(),
"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue = iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(firstSubQueue.getState().getStateName(), "running");
assertEquals(secondSubQueue.getState().getStateName(), "stopped");
assertTrue(manager.isRunning("first"));
assertFalse(manager.isRunning("second"));
assertEquals(firstSubQueue.getSchedulingInfo(), "queueInfo");
assertEquals(secondSubQueue.getSchedulingInfo(), "queueInfoqueueInfo");
// test leaf queue
Set<String> template = new HashSet<String>();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(), template);
}
/**
* write cofiguration
* @return
* @throws IOException
*/
private File writeFile() throws IOException {
File f = new File(testDir, "tst.xml");
BufferedWriter out = new BufferedWriter(new FileWriter(f));
String properties = "<properties><property key=\"key\" value=\"value\"/><property key=\"key1\" value=\"value1\"/></properties>";
out.write("<queues>");
out.newLine();
out.write("<queue><name>first</name><acl-submit-job>user1,user2 group1,group2</acl-submit-job><acl-administer-jobs>user3,user4 group3,group4</acl-administer-jobs><state>running</state></queue>");
out.newLine();
out.write("<queue><name>second</name><acl-submit-job>u1,u2 g1,g2</acl-submit-job>"
+ properties + "<state>stopped</state></queue>");
out.newLine();
out.write("</queues>");
out.flush();
out.close();
return f;
}
}
| 10,143 | 38.317829 | 502 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobQueueClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import org.junit.Assert;
import org.junit.Test;
public class TestJobQueueClient {
/**
* Test that print job queue recursively prints child queues
*/
@Test
@SuppressWarnings("deprecation")
public void testPrintJobQueueInfo() throws IOException {
JobQueueClient queueClient = new JobQueueClient();
JobQueueInfo parent = new JobQueueInfo();
JobQueueInfo child = new JobQueueInfo();
JobQueueInfo grandChild = new JobQueueInfo();
child.addChild(grandChild);
parent.addChild(child);
grandChild.setQueueName("GrandChildQueue");
ByteArrayOutputStream bbos = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(bbos);
queueClient.printJobQueueInfo(parent, writer);
Assert.assertTrue("printJobQueueInfo did not print grandchild's name",
bbos.toString().contains("GrandChildQueue"));
}
}
| 1,802 | 31.781818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.regex.Pattern;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* test JobConf
*
*/
public class TestJobConf {
/**
* test getters and setters of JobConf
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobConf() {
JobConf conf = new JobConf();
// test default value
Pattern pattern = conf.getJarUnpackPattern();
assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),
pattern.toString());
// default value
assertFalse(conf.getKeepFailedTaskFiles());
conf.setKeepFailedTaskFiles(true);
assertTrue(conf.getKeepFailedTaskFiles());
// default value
assertNull(conf.getKeepTaskFilesPattern());
conf.setKeepTaskFilesPattern("123454");
assertEquals("123454", conf.getKeepTaskFilesPattern());
// default value
assertNotNull(conf.getWorkingDirectory());
conf.setWorkingDirectory(new Path("test"));
assertTrue(conf.getWorkingDirectory().toString().endsWith("test"));
// default value
assertEquals(1, conf.getNumTasksToExecutePerJvm());
// default value
assertNull(conf.getKeyFieldComparatorOption());
conf.setKeyFieldComparatorOptions("keySpec");
assertEquals("keySpec", conf.getKeyFieldComparatorOption());
// default value
assertFalse(conf.getUseNewReducer());
conf.setUseNewReducer(true);
assertTrue(conf.getUseNewReducer());
// default
assertTrue(conf.getMapSpeculativeExecution());
assertTrue(conf.getReduceSpeculativeExecution());
assertTrue(conf.getSpeculativeExecution());
conf.setReduceSpeculativeExecution(false);
assertTrue(conf.getSpeculativeExecution());
conf.setMapSpeculativeExecution(false);
assertFalse(conf.getSpeculativeExecution());
assertFalse(conf.getMapSpeculativeExecution());
assertFalse(conf.getReduceSpeculativeExecution());
conf.setSessionId("ses");
assertEquals("ses", conf.getSessionId());
assertEquals(3, conf.getMaxTaskFailuresPerTracker());
conf.setMaxTaskFailuresPerTracker(2);
assertEquals(2, conf.getMaxTaskFailuresPerTracker());
assertEquals(0, conf.getMaxMapTaskFailuresPercent());
conf.setMaxMapTaskFailuresPercent(50);
assertEquals(50, conf.getMaxMapTaskFailuresPercent());
assertEquals(0, conf.getMaxReduceTaskFailuresPercent());
conf.setMaxReduceTaskFailuresPercent(70);
assertEquals(70, conf.getMaxReduceTaskFailuresPercent());
// by default
assertEquals(JobPriority.NORMAL.name(), conf.getJobPriority().name());
conf.setJobPriority(JobPriority.HIGH);
assertEquals(JobPriority.HIGH.name(), conf.getJobPriority().name());
assertNull(conf.getJobSubmitHostName());
conf.setJobSubmitHostName("hostname");
assertEquals("hostname", conf.getJobSubmitHostName());
// default
assertNull(conf.getJobSubmitHostAddress());
conf.setJobSubmitHostAddress("ww");
assertEquals("ww", conf.getJobSubmitHostAddress());
// default value
assertFalse(conf.getProfileEnabled());
conf.setProfileEnabled(true);
assertTrue(conf.getProfileEnabled());
// default value
assertEquals(conf.getProfileTaskRange(true).toString(), "0-2");
assertEquals(conf.getProfileTaskRange(false).toString(), "0-2");
conf.setProfileTaskRange(true, "0-3");
assertEquals(conf.getProfileTaskRange(false).toString(), "0-2");
assertEquals(conf.getProfileTaskRange(true).toString(), "0-3");
// default value
assertNull(conf.getMapDebugScript());
conf.setMapDebugScript("mDbgScript");
assertEquals("mDbgScript", conf.getMapDebugScript());
// default value
assertNull(conf.getReduceDebugScript());
conf.setReduceDebugScript("rDbgScript");
assertEquals("rDbgScript", conf.getReduceDebugScript());
// default value
assertNull(conf.getJobLocalDir());
assertEquals("default", conf.getQueueName());
conf.setQueueName("qname");
assertEquals("qname", conf.getQueueName());
conf.setMemoryForMapTask(100 * 1000);
assertEquals(100 * 1000, conf.getMemoryForMapTask());
conf.setMemoryForReduceTask(1000 * 1000);
assertEquals(1000 * 1000, conf.getMemoryForReduceTask());
assertEquals(-1, conf.getMaxPhysicalMemoryForTask());
assertEquals("The variable key is no longer used.",
JobConf.deprecatedString("key"));
// make sure mapreduce.map|reduce.java.opts are not set by default
// so that they won't override mapred.child.java.opts
assertEquals("mapreduce.map.java.opts should not be set by default",
null, conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS));
assertEquals("mapreduce.reduce.java.opts should not be set by default",
null, conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS));
}
/**
* Ensure that M/R 1.x applications can get and set task virtual memory with
* old property names
*/
@SuppressWarnings("deprecation")
@Test (timeout = 1000)
public void testDeprecatedPropertyNameForTaskVmem() {
JobConf configuration = new JobConf();
configuration.setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, 1024);
configuration.setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, 1024);
Assert.assertEquals(1024, configuration.getMemoryForMapTask());
Assert.assertEquals(1024, configuration.getMemoryForReduceTask());
// Make sure new property names aren't broken by the old ones
configuration.setLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY, 1025);
configuration.setLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, 1025);
Assert.assertEquals(1025, configuration.getMemoryForMapTask());
Assert.assertEquals(1025, configuration.getMemoryForReduceTask());
configuration.setMemoryForMapTask(2048);
configuration.setMemoryForReduceTask(2048);
Assert.assertEquals(2048, configuration.getLong(
JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, -1));
Assert.assertEquals(2048, configuration.getLong(
JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, -1));
// Make sure new property names aren't broken by the old ones
Assert.assertEquals(2048, configuration.getLong(
JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY, -1));
Assert.assertEquals(2048, configuration.getLong(
JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, -1));
}
@Test
public void testProfileParamsDefaults() {
JobConf configuration = new JobConf();
String result = configuration.getProfileParams();
Assert.assertNotNull(result);
Assert.assertTrue(result.contains("file=%s"));
Assert.assertTrue(result.startsWith("-agentlib:hprof"));
}
@Test
public void testProfileParamsSetter() {
JobConf configuration = new JobConf();
configuration.setProfileParams("test");
Assert.assertEquals("test", configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
}
@Test
public void testProfileParamsGetter() {
JobConf configuration = new JobConf();
configuration.set(MRJobConfig.TASK_PROFILE_PARAMS, "test");
Assert.assertEquals("test", configuration.getProfileParams());
}
/**
* Testing mapred.task.maxvmem replacement with new values
*
*/
@Test
public void testMemoryConfigForMapOrReduceTask(){
JobConf configuration = new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
Assert.assertEquals(configuration.getMemoryForMapTask(),300);
Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
configuration = new JobConf();
configuration.set("mapred.task.maxvmem" , "-1");
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400));
Assert.assertEquals(configuration.getMemoryForMapTask(), 300);
Assert.assertEquals(configuration.getMemoryForReduceTask(), 400);
configuration = new JobConf();
configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
configuration = new JobConf();
configuration.set("mapred.task.maxvmem" , String.valueOf(-1));
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);
configuration = new JobConf();
configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB, "3");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "3");
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
}
/**
* Test that negative values for MAPRED_TASK_MAXVMEM_PROPERTY cause
* new configuration keys' values to be used.
*/
@Test
public void testNegativeValueForTaskVmem() {
JobConf configuration = new JobConf();
configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-3");
Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB,
configuration.getMemoryForMapTask());
Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,
configuration.getMemoryForReduceTask());
configuration.set(MRJobConfig.MAP_MEMORY_MB, "4");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "5");
Assert.assertEquals(4, configuration.getMemoryForMapTask());
Assert.assertEquals(5, configuration.getMemoryForReduceTask());
}
/**
* Test that negative values for new configuration keys get passed through.
*/
@Test
public void testNegativeValuesForMemoryParams() {
JobConf configuration = new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB, "-5");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "-6");
Assert.assertEquals(-5, configuration.getMemoryForMapTask());
Assert.assertEquals(-6, configuration.getMemoryForReduceTask());
}
/**
* Test deprecated accessor and mutator method for mapred.task.maxvmem
*/
@Test
public void testMaxVirtualMemoryForTask() {
JobConf configuration = new JobConf();
//get test case
configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(-1));
Assert.assertEquals(
configuration.getMaxVirtualMemoryForTask(), 300 * 1024 * 1024);
configuration = new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(-1));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(200));
Assert.assertEquals(
configuration.getMaxVirtualMemoryForTask(), 200 * 1024 * 1024);
configuration = new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(-1));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(-1));
configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024));
Assert.assertEquals(
configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024);
configuration = new JobConf();
configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024));
Assert.assertEquals(
configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024);
//set test case
configuration = new JobConf();
configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
Assert.assertEquals(configuration.getMemoryForMapTask(), 2);
Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);
configuration = new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(400));
configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
Assert.assertEquals(configuration.getMemoryForMapTask(), 2);
Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);
}
/**
* Ensure that by default JobContext.MAX_TASK_FAILURES_PER_TRACKER is less
* JobContext.MAP_MAX_ATTEMPTS and JobContext.REDUCE_MAX_ATTEMPTS so that
* failed tasks will be retried on other nodes
*/
@Test
public void testMaxTaskFailuresPerTracker() {
JobConf jobConf = new JobConf(true);
Assert.assertTrue("By default JobContext.MAX_TASK_FAILURES_PER_TRACKER was "
+ "not less than JobContext.MAP_MAX_ATTEMPTS and REDUCE_MAX_ATTEMPTS"
,jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxMapAttempts() &&
jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxReduceAttempts()
);
}
}
| 14,035 | 37.666667 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSkipBadRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* test SkipBadRecords
*
*
*/
public class TestSkipBadRecords {
@Test (timeout=5000)
public void testSkipBadRecords() {
// test default values
Configuration conf = new Configuration();
assertEquals(2, SkipBadRecords.getAttemptsToStartSkipping(conf));
assertTrue(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertTrue(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(0, SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(0, SkipBadRecords.getReducerMaxSkipGroups(conf), 0);
assertNull(SkipBadRecords.getSkipOutputPath(conf));
// test setters
SkipBadRecords.setAttemptsToStartSkipping(conf, 5);
SkipBadRecords.setAutoIncrMapperProcCount(conf, false);
SkipBadRecords.setAutoIncrReducerProcCount(conf, false);
SkipBadRecords.setMapperMaxSkipRecords(conf, 6L);
SkipBadRecords.setReducerMaxSkipGroups(conf, 7L);
JobConf jc= new JobConf();
SkipBadRecords.setSkipOutputPath(jc, new Path("test"));
// test getters
assertEquals(5, SkipBadRecords.getAttemptsToStartSkipping(conf));
assertFalse(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertFalse(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(6L, SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(7L, SkipBadRecords.getReducerMaxSkipGroups(conf), 0);
assertEquals("test",SkipBadRecords.getSkipOutputPath(jc).toString());
}
}
| 2,430 | 38.209677 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
@SuppressWarnings("unchecked")
public class TestFileOutputCommitter extends TestCase {
private static Path outDir = new Path(System.getProperty("test.build.data",
"/tmp"), "output");
// A random task attempt id for testing.
private static String attempt = "attempt_200707121733_0001_m_000000_0";
private static String partFile = "part-00000";
private static TaskAttemptID taskID = TaskAttemptID.forName(attempt);
private Text key1 = new Text("key1");
private Text key2 = new Text("key2");
private Text val1 = new Text("val1");
private Text val2 = new Text("val2");
private void writeOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(null);
}
}
private void writeMapFileOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
try {
int key = 0;
for (int i = 0 ; i < 10; ++i) {
key = i;
Text val = (i%2 == 1) ? val1 : val2;
theRecordWriter.write(new LongWritable(key),
val);
}
} finally {
theRecordWriter.close(null);
}
}
private void testRecoveryInternal(int commitVersion, int recoveryVersion)
throws Exception {
JobConf conf = new JobConf();
FileOutputFormat.setOutputPath(conf, outDir);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 1);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
commitVersion);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter =
theOutputFormat.getRecordWriter(null, conf, partFile, null);
writeOutput(theRecordWriter, tContext);
// do commit
if(committer.needsTaskCommit(tContext)) {
committer.commitTask(tContext);
}
Path jobTempDir1 = committer.getCommittedTaskPath(tContext);
File jtd1 = new File(jobTempDir1.toUri().getPath());
if (commitVersion == 1) {
assertTrue("Version 1 commits to temporary dir " + jtd1, jtd1.exists());
validateContent(jobTempDir1);
} else {
assertFalse("Version 2 commits to output dir " + jtd1, jtd1.exists());
}
//now while running the second app attempt,
//recover the task output from first attempt
JobConf conf2 = new JobConf(conf);
conf2.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf2.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 2);
conf2.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
recoveryVersion);
JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID());
TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID);
FileOutputCommitter committer2 = new FileOutputCommitter();
committer2.setupJob(jContext2);
committer2.recoverTask(tContext2);
Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2);
File jtd2 = new File(jobTempDir2.toUri().getPath());
if (recoveryVersion == 1) {
assertTrue("Version 1 recovers to " + jtd2, jtd2.exists());
validateContent(jobTempDir2);
} else {
assertFalse("Version 2 commits to output dir " + jtd2, jtd2.exists());
if (commitVersion == 1) {
assertTrue("Version 2 recovery moves to output dir from "
+ jtd1 , jtd1.list().length == 0);
}
}
committer2.commitJob(jContext2);
validateContent(outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testRecoveryV1() throws Exception {
testRecoveryInternal(1, 1);
}
public void testRecoveryV2() throws Exception {
testRecoveryInternal(2, 2);
}
public void testRecoveryUpgradeV1V2() throws Exception {
testRecoveryInternal(1, 2);
}
private void validateContent(Path dir) throws IOException {
File fdir = new File(dir.toUri().getPath());
File expectedFile = new File(fdir, partFile);
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = slurp(expectedFile);
assertEquals(output, expectedOutput.toString());
}
private void validateMapFileOutputContent(
FileSystem fs, Path dir) throws IOException {
// map output is a directory with index and data files
Path expectedMapDir = new Path(dir, partFile);
assert(fs.getFileStatus(expectedMapDir).isDirectory());
FileStatus[] files = fs.listStatus(expectedMapDir);
int fileCount = 0;
boolean dataFileFound = false;
boolean indexFileFound = false;
for (FileStatus f : files) {
if (f.isFile()) {
++fileCount;
if (f.getPath().getName().equals(MapFile.INDEX_FILE_NAME)) {
indexFileFound = true;
}
else if (f.getPath().getName().equals(MapFile.DATA_FILE_NAME)) {
dataFileFound = true;
}
}
}
assert(fileCount > 0);
assert(dataFileFound && indexFileFound);
}
private void testCommitterInternal(int version) throws Exception {
JobConf conf = new JobConf();
FileOutputFormat.setOutputPath(conf, outDir);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter =
theOutputFormat.getRecordWriter(null, conf, partFile, null);
writeOutput(theRecordWriter, tContext);
// do commit
if(committer.needsTaskCommit(tContext)) {
committer.commitTask(tContext);
}
committer.commitJob(jContext);
// validate output
validateContent(outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testCommitterV1() throws Exception {
testCommitterInternal(1);
}
public void testCommitterV2() throws Exception {
testCommitterInternal(2);
}
private void testMapFileOutputCommitterInternal(int version)
throws Exception {
JobConf conf = new JobConf();
FileOutputFormat.setOutputPath(conf, outDir);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
RecordWriter theRecordWriter =
theOutputFormat.getRecordWriter(null, conf, partFile, null);
writeMapFileOutput(theRecordWriter, tContext);
// do commit
if(committer.needsTaskCommit(tContext)) {
committer.commitTask(tContext);
}
committer.commitJob(jContext);
// validate output
validateMapFileOutputContent(FileSystem.get(conf), outDir);
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testMapFileOutputCommitterV1() throws Exception {
testMapFileOutputCommitterInternal(1);
}
public void testMapFileOutputCommitterV2() throws Exception {
testMapFileOutputCommitterInternal(2);
}
public void testMapOnlyNoOutputV1() throws Exception {
testMapOnlyNoOutputInternal(1);
}
public void testMapOnlyNoOutputV2() throws Exception {
testMapOnlyNoOutputInternal(2);
}
private void testMapOnlyNoOutputInternal(int version) throws Exception {
JobConf conf = new JobConf();
//This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// setup
committer.setupJob(jContext);
committer.setupTask(tContext);
if(committer.needsTaskCommit(tContext)) {
// do commit
committer.commitTask(tContext);
}
committer.commitJob(jContext);
// validate output
FileUtil.fullyDelete(new File(outDir.toString()));
}
private void testAbortInternal(int version)
throws IOException, InterruptedException {
JobConf conf = new JobConf();
FileOutputFormat.setOutputPath(conf, outDir);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
TextOutputFormat theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter =
theOutputFormat.getRecordWriter(null, conf, partFile, null);
writeOutput(theRecordWriter, tContext);
// do abort
committer.abortTask(tContext);
File out = new File(outDir.toUri().getPath());
Path workPath = committer.getWorkPath(tContext, outDir);
File wp = new File(workPath.toUri().getPath());
File expectedFile = new File(wp, partFile);
assertFalse("task temp dir still exists", expectedFile.exists());
committer.abortJob(jContext, JobStatus.State.FAILED);
expectedFile = new File(out, FileOutputCommitter.TEMP_DIR_NAME);
assertFalse("job temp dir still exists", expectedFile.exists());
assertEquals("Output directory not empty", 0, out.listFiles().length);
FileUtil.fullyDelete(out);
}
public void testAbortV1() throws Exception {
testAbortInternal(1);
}
public void testAbortV2() throws Exception {
testAbortInternal(2);
}
public static class FakeFileSystem extends RawLocalFileSystem {
public FakeFileSystem() {
super();
}
public URI getUri() {
return URI.create("faildel:///");
}
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
throw new IOException("fake delete failed");
}
}
private void testFailAbortInternal(int version)
throws IOException, InterruptedException {
JobConf conf = new JobConf();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
conf.setInt(org.apache.hadoop.mapreduce.lib.output.
FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
conf.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 1);
FileOutputFormat.setOutputPath(conf, outDir);
JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
FileOutputCommitter committer = new FileOutputCommitter();
// do setup
committer.setupJob(jContext);
committer.setupTask(tContext);
// write output
File jobTmpDir = new File(new Path(outDir,
FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR +
conf.getInt(MRConstants.APPLICATION_ATTEMPT_ID, 0) +
Path.SEPARATOR +
FileOutputCommitter.TEMP_DIR_NAME).toString());
File taskTmpDir = new File(jobTmpDir, "_" + taskID);
File expectedFile = new File(taskTmpDir, partFile);
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter<?, ?> theRecordWriter =
theOutputFormat.getRecordWriter(null, conf,
expectedFile.getAbsolutePath(), null);
writeOutput(theRecordWriter, tContext);
// do abort
Throwable th = null;
try {
committer.abortTask(tContext);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue(expectedFile + " does not exists", expectedFile.exists());
th = null;
try {
committer.abortJob(jContext, JobStatus.State.FAILED);
} catch (IOException ie) {
th = ie;
}
assertNotNull(th);
assertTrue(th instanceof IOException);
assertTrue(th.getMessage().contains("fake delete failed"));
assertTrue("job temp dir does not exists", jobTmpDir.exists());
FileUtil.fullyDelete(new File(outDir.toString()));
}
public void testFailAbortV1() throws Exception {
testFailAbortInternal(1);
}
public void testFailAbortV2() throws Exception {
testFailAbortInternal(2);
}
public static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
}
| 16,055 | 34.287912 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.StringWriter;
import java.io.Writer;
import org.apache.log4j.Category;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.Priority;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTaskLogAppender {
/**
* test TaskLogAppender
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskLogAppender(){
TaskLogAppender appender= new TaskLogAppender();
System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001");
System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY, "1003");
appender.activateOptions();
assertEquals(appender.getTaskId(), "attempt_01_02_m03_04_001");
assertEquals(appender.getTotalLogFileSize(),1000);
assertEquals(appender.getIsCleanup(),false);
// test writer
Writer writer= new StringWriter();
appender.setWriter(writer);
Layout layout = new PatternLayout("%-5p [%t]: %m%n");
appender.setLayout(layout);
Category logger= Logger.getLogger(getClass().getName());
LoggingEvent event = new LoggingEvent("fqnOfCategoryClass", logger, Priority.INFO, "message", new Throwable());
appender.append(event);
appender.flush() ;
appender.close();
assertTrue(writer.toString().length()>0);
// test cleanup should not changed
appender= new TaskLogAppender();
appender.setIsCleanup(true);
appender.activateOptions();
assertEquals(appender.getIsCleanup(),true);
}
}
| 2,436 | 32.847222 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMaster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.*;
import java.net.InetSocketAddress;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
public class TestMaster {
@Test
public void testGetMasterAddress() {
YarnConfiguration conf = new YarnConfiguration();
// Default is yarn framework
String masterHostname = Master.getMasterAddress(conf).getHostName();
// no address set so should default to default rm address
InetSocketAddress rmAddr = NetUtils.createSocketAddr(YarnConfiguration.DEFAULT_RM_ADDRESS);
assertEquals(masterHostname, rmAddr.getHostName());
// Trying invalid master address for classic
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(MRConfig.MASTER_ADDRESS, "local:invalid");
// should throw an exception for invalid value
try {
Master.getMasterAddress(conf);
fail("Should not reach here as there is a bad master address");
}
catch (Exception e) {
// Expected
}
// Change master address to a valid value
conf.set(MRConfig.MASTER_ADDRESS, "bar.com:8042");
masterHostname = Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname, "bar.com");
// change framework to yarn
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS, "foo1.com:8192");
masterHostname = Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname, "foo1.com");
}
@Test
public void testGetMasterUser() {
YarnConfiguration conf = new YarnConfiguration();
conf.set(MRConfig.MASTER_USER_NAME, "foo");
conf.set(YarnConfiguration.RM_PRINCIPAL, "bar");
// default is yarn framework
assertEquals(Master.getMasterUserName(conf), "bar");
// set framework name to classic
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
assertEquals(Master.getMasterUserName(conf), "foo");
// change framework to yarn
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
assertEquals(Master.getMasterUserName(conf), "bar");
}
}
| 3,047 | 33.247191 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestClusterStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.junit.Assert;
import org.junit.Test;
public class TestClusterStatus {
private ClusterStatus clusterStatus = new ClusterStatus();
@SuppressWarnings("deprecation")
@Test (timeout = 1000)
public void testGraylistedTrackers() {
Assert.assertEquals(0, clusterStatus.getGraylistedTrackers());
Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty());
}
@SuppressWarnings("deprecation")
@Test (timeout = 1000)
public void testJobTrackerState() {
Assert.assertEquals(JobTracker.State.RUNNING,
clusterStatus.getJobTrackerState());
}
}
| 1,436 | 33.214286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestOldMethodsJobID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.mapred.TaskCompletionEvent.Status;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test deprecated methods
*
*/
public class TestOldMethodsJobID {
/**
* test deprecated methods of TaskID
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testDepricatedMethods() throws IOException {
JobID jid = new JobID();
TaskID test = new TaskID(jid, true, 1);
assertEquals(test.getTaskType(), TaskType.MAP);
test = new TaskID(jid, false, 1);
assertEquals(test.getTaskType(), TaskType.REDUCE);
test = new TaskID("001", 1, false, 1);
assertEquals(test.getTaskType(), TaskType.REDUCE);
test = new TaskID("001", 1, true, 1);
assertEquals(test.getTaskType(), TaskType.MAP);
ByteArrayOutputStream out = new ByteArrayOutputStream();
test.write(new DataOutputStream(out));
TaskID ti = TaskID.read(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(ti.toString(), test.toString());
assertEquals("task_001_0001_m_000002",
TaskID.getTaskIDsPattern("001", 1, true, 2));
assertEquals("task_003_0001_m_000004",
TaskID.getTaskIDsPattern("003", 1, TaskType.MAP, 4));
assertEquals("003_0001_m_000004",
TaskID.getTaskIDsPatternWOPrefix("003", 1, TaskType.MAP, 4).toString());
}
/**
* test JobID
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobID() throws IOException{
JobID jid = new JobID("001",2);
ByteArrayOutputStream out = new ByteArrayOutputStream();
jid.write(new DataOutputStream(out));
assertEquals(jid,JobID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))));
assertEquals("job_001_0001",JobID.getJobIDsPattern("001",1));
}
/**
* test deprecated methods of TaskCompletionEvent
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskCompletionEvent() {
TaskAttemptID taid = new TaskAttemptID("001", 1, TaskType.REDUCE, 2, 3);
TaskCompletionEvent template = new TaskCompletionEvent(12, taid, 13, true,
Status.SUCCEEDED, "httptracker");
TaskCompletionEvent testEl = TaskCompletionEvent.downgrade(template);
testEl.setTaskAttemptId(taid);
testEl.setTaskTrackerHttp("httpTracker");
testEl.setTaskId("attempt_001_0001_m_000002_04");
assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());
testEl.setTaskStatus(Status.OBSOLETE);
assertEquals(Status.OBSOLETE.toString(), testEl.getStatus().toString());
testEl.setTaskRunTime(20);
assertEquals(testEl.getTaskRunTime(), 20);
testEl.setEventId(16);
assertEquals(testEl.getEventId(), 16);
}
/**
* test depricated methods of JobProfile
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobProfile() throws IOException {
JobProfile profile = new JobProfile("user", "job_001_03", "jobFile", "uri",
"name");
assertEquals("job_001_0003", profile.getJobId());
assertEquals("default", profile.getQueueName());
// serialization test
ByteArrayOutputStream out = new ByteArrayOutputStream();
profile.write(new DataOutputStream(out));
JobProfile profile2 = new JobProfile();
profile2.readFields(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(profile2.name, profile.name);
assertEquals(profile2.jobFile, profile.jobFile);
assertEquals(profile2.queueName, profile.queueName);
assertEquals(profile2.url, profile.url);
assertEquals(profile2.user, profile.user);
}
/**
* test TaskAttemptID
*/
@SuppressWarnings( "deprecation" )
@Test (timeout=5000)
public void testTaskAttemptID (){
TaskAttemptID task = new TaskAttemptID("001",2,true,3,4);
assertEquals("attempt_001_0002_m_000003_4", TaskAttemptID.getTaskAttemptIDsPattern("001", 2, true, 3, 4));
assertEquals("task_001_0002_m_000003", task.getTaskID().toString());
assertEquals("attempt_001_0001_r_000002_3",TaskAttemptID.getTaskAttemptIDsPattern("001", 1, TaskType.REDUCE, 2, 3));
assertEquals("001_0001_m_000001_2", TaskAttemptID.getTaskAttemptIDsPatternWOPrefix("001",1, TaskType.MAP, 1, 2).toString());
}
/**
* test Reporter.NULL
*
*/
@Test (timeout=5000)
public void testReporter(){
Reporter nullReporter=Reporter.NULL;
assertNull(nullReporter.getCounter(null));
assertNull(nullReporter.getCounter("group", "name"));
// getInputSplit method removed
try{
assertNull(nullReporter.getInputSplit());
}catch(UnsupportedOperationException e){
assertEquals( "NULL reporter has no input",e.getMessage());
}
assertEquals(0,nullReporter.getProgress(),0.01);
}
}
| 5,907 | 33.95858 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestClock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* test Clock class
*
*/
public class TestClock {
@Test (timeout=1000)
public void testClock(){
Clock clock= new Clock();
long templateTime=System.currentTimeMillis();
long time=clock.getTime();
assertEquals(templateTime, time,30);
}
}
| 1,169 | 29 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.Decompressor;
import org.junit.Test;
public class TestLineRecordReader {
private static Path workDir = new Path(new Path(System.getProperty(
"test.build.data", "target"), "data"), "TestTextInputFormat");
private static Path inputDir = new Path(workDir, "input");
private void testSplitRecords(String testFileName, long firstSplitLength)
throws IOException {
URL testFileUrl = getClass().getClassLoader().getResource(testFileName);
assertNotNull("Cannot find " + testFileName, testFileUrl);
File testFile = new File(testFileUrl.getFile());
long testFileSize = testFile.length();
Path testFilePath = new Path(testFile.getAbsolutePath());
Configuration conf = new Configuration();
testSplitRecordsForFile(conf, firstSplitLength, testFileSize, testFilePath);
}
private void testSplitRecordsForFile(Configuration conf,
long firstSplitLength, long testFileSize, Path testFilePath)
throws IOException {
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
assertTrue("unexpected test data at " + testFilePath,
testFileSize > firstSplitLength);
String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
}
// read the data without splitting to count the records
FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
(String[])null);
LineRecordReader reader = new LineRecordReader(conf, split,
recordDelimiterBytes);
LongWritable key = new LongWritable();
Text value = new Text();
int numRecordsNoSplits = 0;
while (reader.next(key, value)) {
++numRecordsNoSplits;
}
reader.close();
// count the records in the first split
split = new FileSplit(testFilePath, 0, firstSplitLength, (String[])null);
reader = new LineRecordReader(conf, split, recordDelimiterBytes);
int numRecordsFirstSplit = 0;
while (reader.next(key, value)) {
++numRecordsFirstSplit;
}
reader.close();
// count the records in the second split
split = new FileSplit(testFilePath, firstSplitLength,
testFileSize - firstSplitLength, (String[])null);
reader = new LineRecordReader(conf, split, recordDelimiterBytes);
int numRecordsRemainingSplits = 0;
while (reader.next(key, value)) {
++numRecordsRemainingSplits;
}
reader.close();
assertEquals("Unexpected number of records in split",
numRecordsNoSplits, numRecordsFirstSplit + numRecordsRemainingSplits);
}
@Test
public void testBzip2SplitEndsAtCR() throws IOException {
// the test data contains a carriage-return at the end of the first
// split which ends at compressed offset 136498 and the next
// character is not a linefeed
testSplitRecords("blockEndingInCR.txt.bz2", 136498);
}
@Test
public void testBzip2SplitEndsAtCRThenLF() throws IOException {
// the test data contains a carriage-return at the end of the first
// split which ends at compressed offset 136498 and the next
// character is a linefeed
testSplitRecords("blockEndingInCRThenLF.txt.bz2", 136498);
}
//This test ensures record reader doesn't lose records when it starts
//exactly at the starting byte of a bz2 compressed block
@Test
public void testBzip2SplitStartAtBlockMarker() throws IOException {
//136504 in blockEndingInCR.txt.bz2 is the byte at which the bz2 block ends
//In the following test cases record readers should iterate over all the records
//and should not miss any record.
//Start next split at just the start of the block.
testSplitRecords("blockEndingInCR.txt.bz2", 136504);
//Start next split a byte forward in next block.
testSplitRecords("blockEndingInCR.txt.bz2", 136505);
//Start next split 3 bytes forward in next block.
testSplitRecords("blockEndingInCR.txt.bz2", 136508);
//Start next split 10 bytes from behind the end marker.
testSplitRecords("blockEndingInCR.txt.bz2", 136494);
}
@Test(expected=IOException.class)
public void testSafeguardSplittingUnsplittableFiles() throws IOException {
// The LineRecordReader must fail when trying to read a file that
// was compressed using an unsplittable file format
testSplitRecords("TestSafeguardSplittingUnsplittableFiles.txt.gz", 2);
}
// Use the LineRecordReader to read records from the file
public ArrayList<String> readRecords(URL testFileUrl, int splitSize)
throws IOException {
// Set up context
File testFile = new File(testFileUrl.getFile());
long testFileSize = testFile.length();
Path testFilePath = new Path(testFile.getAbsolutePath());
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", 1);
// Gather the records returned by the record reader
ArrayList<String> records = new ArrayList<String>();
long offset = 0;
LongWritable key = new LongWritable();
Text value = new Text();
while (offset < testFileSize) {
FileSplit split =
new FileSplit(testFilePath, offset, splitSize, (String[]) null);
LineRecordReader reader = new LineRecordReader(conf, split);
while (reader.next(key, value)) {
records.add(value.toString());
}
offset += splitSize;
}
return records;
}
// Gather the records by just splitting on new lines
public String[] readRecordsDirectly(URL testFileUrl, boolean bzip)
throws IOException {
int MAX_DATA_SIZE = 1024 * 1024;
byte[] data = new byte[MAX_DATA_SIZE];
FileInputStream fis = new FileInputStream(testFileUrl.getFile());
int count;
if (bzip) {
BZip2CompressorInputStream bzIn = new BZip2CompressorInputStream(fis);
count = bzIn.read(data);
bzIn.close();
} else {
count = fis.read(data);
}
fis.close();
assertTrue("Test file data too big for buffer", count < data.length);
return new String(data, 0, count, "UTF-8").split("\n");
}
public void checkRecordSpanningMultipleSplits(String testFile,
int splitSize,
boolean bzip)
throws IOException {
URL testFileUrl = getClass().getClassLoader().getResource(testFile);
ArrayList<String> records = readRecords(testFileUrl, splitSize);
String[] actuals = readRecordsDirectly(testFileUrl, bzip);
assertEquals("Wrong number of records", actuals.length, records.size());
boolean hasLargeRecord = false;
for (int i = 0; i < actuals.length; ++i) {
assertEquals(actuals[i], records.get(i));
if (actuals[i].length() > 2 * splitSize) {
hasLargeRecord = true;
}
}
assertTrue("Invalid test data. Doesn't have a large enough record",
hasLargeRecord);
}
@Test
public void testRecordSpanningMultipleSplits()
throws IOException {
checkRecordSpanningMultipleSplits("recordSpanningMultipleSplits.txt",
10, false);
}
@Test
public void testRecordSpanningMultipleSplitsCompressed()
throws IOException {
// The file is generated with bz2 block size of 100k. The split size
// needs to be larger than that for the CompressedSplitLineReader to
// work.
checkRecordSpanningMultipleSplits("recordSpanningMultipleSplits.txt.bz2",
200 * 1000, true);
}
@Test
public void testStripBOM() throws IOException {
// the test data contains a BOM at the start of the file
// confirm the BOM is skipped by LineRecordReader
String UTF8_BOM = "\uFEFF";
URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
assertNotNull("Cannot find testBOM.txt", testFileUrl);
File testFile = new File(testFileUrl.getFile());
Path testFilePath = new Path(testFile.getAbsolutePath());
long testFileSize = testFile.length();
Configuration conf = new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
// read the data and check whether BOM is skipped
FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
(String[])null);
LineRecordReader reader = new LineRecordReader(conf, split);
LongWritable key = new LongWritable();
Text value = new Text();
int numRecords = 0;
boolean firstLine = true;
boolean skipBOM = true;
while (reader.next(key, value)) {
if (firstLine) {
firstLine = false;
if (value.toString().startsWith(UTF8_BOM)) {
skipBOM = false;
}
}
++numRecords;
}
reader.close();
assertTrue("BOM is not skipped", skipBOM);
}
@Test
public void testMultipleClose() throws IOException {
URL testFileUrl = getClass().getClassLoader().
getResource("recordSpanningMultipleSplits.txt.bz2");
assertNotNull("Cannot find recordSpanningMultipleSplits.txt.bz2",
testFileUrl);
File testFile = new File(testFileUrl.getFile());
Path testFilePath = new Path(testFile.getAbsolutePath());
long testFileSize = testFile.length();
Configuration conf = new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
(String[])null);
LineRecordReader reader = new LineRecordReader(conf, split);
LongWritable key = new LongWritable();
Text value = new Text();
//noinspection StatementWithEmptyBody
while (reader.next(key, value)) ;
reader.close();
reader.close();
BZip2Codec codec = new BZip2Codec();
codec.setConf(conf);
Set<Decompressor> decompressors = new HashSet<Decompressor>();
for (int i = 0; i < 10; ++i) {
decompressors.add(CodecPool.getDecompressor(codec));
}
assertEquals(10, decompressors.size());
}
/**
* Writes the input test file
*
* @param conf
* @return Path of the file created
* @throws IOException
*/
private Path createInputFile(Configuration conf, String data)
throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path file = new Path(inputDir, "test.txt");
Writer writer = new OutputStreamWriter(localFs.create(file));
try {
writer.write(data);
} finally {
writer.close();
}
return file;
}
@Test
public void testUncompressedInput() throws Exception {
Configuration conf = new Configuration();
String inputData = "abc+++def+++ghi+++"
+ "jkl+++mno+++pqr+++stu+++vw +++xyz";
Path inputFile = createInputFile(conf, inputData);
conf.set("textinputformat.record.delimiter", "+++");
for(int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for(int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
@Test
public void testUncompressedInputContainingCRLF() throws Exception {
Configuration conf = new Configuration();
String inputData = "a\r\nb\rc\nd\r\n";
Path inputFile = createInputFile(conf, inputData);
for(int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for(int splitSize = 1; splitSize < inputData.length(); splitSize++) {
conf.setInt("io.file.buffer.size", bufferSize);
testSplitRecordsForFile(conf, splitSize, inputData.length(), inputFile);
}
}
}
}
| 13,345 | 35.969529 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/lib/TestCombineFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.File;
import java.io.FileWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.fs.FileUtil;
import org.junit.Test;
import org.mockito.Mockito;
import org.junit.Assert;
import java.io.IOException;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class TestCombineFileRecordReader {
private static Path outDir = new Path(System.getProperty("test.build.data",
"/tmp"), TestCombineFileRecordReader.class.getName());
private static class TextRecordReaderWrapper
extends org.apache.hadoop.mapred.lib.CombineFileRecordReaderWrapper<LongWritable,Text> {
// this constructor signature is required by CombineFileRecordReader
public TextRecordReaderWrapper(CombineFileSplit split, Configuration conf,
Reporter reporter, Integer idx) throws IOException {
super(new TextInputFormat(), split, conf, reporter, idx);
}
}
@SuppressWarnings("unchecked")
@Test
public void testInitNextRecordReader() throws IOException{
JobConf conf = new JobConf();
Path[] paths = new Path[3];
long[] fileLength = new long[3];
File[] files = new File[3];
LongWritable key = new LongWritable(1);
Text value = new Text();
try {
for(int i=0;i<3;i++){
fileLength[i] = i;
File dir = new File(outDir.toString());
dir.mkdir();
files[i] = new File(dir,"testfile"+i);
FileWriter fileWriter = new FileWriter(files[i]);
fileWriter.close();
paths[i] = new Path(outDir+"/testfile"+i);
}
CombineFileSplit combineFileSplit = new CombineFileSplit(conf, paths, fileLength);
Reporter reporter = Mockito.mock(Reporter.class);
CombineFileRecordReader cfrr = new CombineFileRecordReader(conf, combineFileSplit,
reporter, TextRecordReaderWrapper.class);
verify(reporter).progress();
Assert.assertFalse(cfrr.next(key,value));
verify(reporter, times(3)).progress();
} finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
}
| 3,164 | 34.561798 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/lib/db/TestDBInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib.db;
import java.sql.DriverManager;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit;
import org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader;
import org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DriverForTest;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestDBInputFormat {
/**
* test DBInputFormat class. Class should split result for chunks
* @throws Exception
*/
@Test(timeout = 10000)
public void testDBInputFormat() throws Exception {
JobConf configuration = new JobConf();
setupDriver(configuration);
DBInputFormat<NullDBWritable> format = new DBInputFormat<NullDBWritable>();
format.setConf(configuration);
format.setConf(configuration);
DBInputFormat.DBInputSplit splitter = new DBInputFormat.DBInputSplit(1, 10);
Reporter reporter = mock(Reporter.class);
RecordReader<LongWritable, NullDBWritable> reader = format.getRecordReader(
splitter, configuration, reporter);
configuration.setInt(MRJobConfig.NUM_MAPS, 3);
InputSplit[] lSplits = format.getSplits(configuration, 3);
assertEquals(5, lSplits[0].getLength());
assertEquals(3, lSplits.length);
// test reader .Some simple tests
assertEquals(LongWritable.class, reader.createKey().getClass());
assertEquals(0, reader.getPos());
assertEquals(0, reader.getProgress(), 0.001);
reader.close();
}
/**
* test configuration for db. should works DBConfiguration.* parameters.
*/
@Test (timeout = 5000)
public void testSetInput() {
JobConf configuration = new JobConf();
String[] fieldNames = { "field1", "field2" };
DBInputFormat.setInput(configuration, NullDBWritable.class, "table",
"conditions", "orderBy", fieldNames);
assertEquals(
"org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",
configuration.getClass(DBConfiguration.INPUT_CLASS_PROPERTY, null)
.getName());
assertEquals("table",
configuration.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY, null));
String[] fields = configuration
.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY);
assertEquals("field1", fields[0]);
assertEquals("field2", fields[1]);
assertEquals("conditions",
configuration.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY, null));
assertEquals("orderBy",
configuration.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY, null));
configuration = new JobConf();
DBInputFormat.setInput(configuration, NullDBWritable.class, "query",
"countQuery");
assertEquals("query", configuration.get(DBConfiguration.INPUT_QUERY, null));
assertEquals("countQuery",
configuration.get(DBConfiguration.INPUT_COUNT_QUERY, null));
JobConf jConfiguration = new JobConf();
DBConfiguration.configureDB(jConfiguration, "driverClass", "dbUrl", "user",
"password");
assertEquals("driverClass",
jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
assertEquals("dbUrl", jConfiguration.get(DBConfiguration.URL_PROPERTY));
assertEquals("user", jConfiguration.get(DBConfiguration.USERNAME_PROPERTY));
assertEquals("password",
jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY));
jConfiguration = new JobConf();
DBConfiguration.configureDB(jConfiguration, "driverClass", "dbUrl");
assertEquals("driverClass",
jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
assertEquals("dbUrl", jConfiguration.get(DBConfiguration.URL_PROPERTY));
assertNull(jConfiguration.get(DBConfiguration.USERNAME_PROPERTY));
assertNull(jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY));
}
/**
*
* test DBRecordReader. This reader should creates keys, values, know about position..
*/
@SuppressWarnings("unchecked")
@Test (timeout = 5000)
public void testDBRecordReader() throws Exception {
JobConf job = mock(JobConf.class);
DBConfiguration dbConfig = mock(DBConfiguration.class);
String[] fields = { "field1", "filed2" };
@SuppressWarnings("rawtypes")
DBRecordReader reader = new DBInputFormat<NullDBWritable>().new DBRecordReader(
new DBInputSplit(), NullDBWritable.class, job,
DriverForTest.getConnection(), dbConfig, "condition", fields, "table");
LongWritable key = reader.createKey();
assertEquals(0, key.get());
DBWritable value = reader.createValue();
assertEquals(
"org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable", value
.getClass().getName());
assertEquals(0, reader.getPos());
assertFalse(reader.next(key, value));
}
private void setupDriver(JobConf configuration) throws Exception {
configuration.set(DBConfiguration.URL_PROPERTY, "testUrl");
DriverManager.registerDriver(new DriverForTest());
configuration.set(DBConfiguration.DRIVER_CLASS_PROPERTY,
DriverForTest.class.getCanonicalName());
}
}
| 6,230 | 38.436709 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.filecache;
| 848 | 41.45 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.filecache;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Distribute application-specific large, read-only files efficiently.
*
* <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
* framework to cache files (text, archives, jars etc.) needed by applications.
* </p>
*
* <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
* via the {@link org.apache.hadoop.mapred.JobConf}. The
* <code>DistributedCache</code> assumes that the files specified via urls are
* already present on the {@link FileSystem} at the path specified by the url
* and are accessible by every machine in the cluster.</p>
*
* <p>The framework will copy the necessary files on to the slave node before
* any tasks for the job are executed on that node. Its efficiency stems from
* the fact that the files are only copied once per job and the ability to
* cache archives which are un-archived on the slaves.</p>
*
* <p><code>DistributedCache</code> can be used to distribute simple, read-only
* data/text files and/or more complex types such as archives, jars etc.
* Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
* Jars may be optionally added to the classpath of the tasks, a rudimentary
* software distribution mechanism. Files have execution permissions.
* In older version of Hadoop Map/Reduce users could optionally ask for symlinks
* to be created in the working directory of the child task. In the current
* version symlinks are always created. If the URL does not have a fragment
* the name of the file or directory will be used. If multiple files or
* directories map to the same link name, the last one added, will be used. All
* others will not even be downloaded.</p>
*
* <p><code>DistributedCache</code> tracks modification timestamps of the cache
* files. Clearly the cache files should not be modified by the application
* or externally while the job is executing.</p>
*
* <p>Here is an illustrative example on how to use the
* <code>DistributedCache</code>:</p>
* <p><blockquote><pre>
* // Setting up the cache for the application
*
* 1. Copy the requisite files to the <code>FileSystem</code>:
*
* $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
* $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
* $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
* $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
* $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
* $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
*
* 2. Setup the application's <code>JobConf</code>:
*
* JobConf job = new JobConf();
* DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
* job);
* DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
* DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
* DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
*
* 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
* or {@link org.apache.hadoop.mapred.Reducer}:
*
* public static class MapClass extends MapReduceBase
* implements Mapper<K, V, K, V> {
*
* private Path[] localArchives;
* private Path[] localFiles;
*
* public void configure(JobConf job) {
* // Get the cached archives/files
* File f = new File("./map.zip/some/file/in/zip.txt");
* }
*
* public void map(K key, V value,
* OutputCollector<K, V> output, Reporter reporter)
* throws IOException {
* // Use data from the cached archives/files here
* // ...
* // ...
* output.collect(k, v);
* }
* }
*
* </pre></blockquote>
*
* It is also very common to use the DistributedCache by using
* {@link org.apache.hadoop.util.GenericOptionsParser}.
*
* This class includes methods that should be used by users
* (specifically those mentioned in the example above, as well
* as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
* as well as methods intended for use by the MapReduce framework
* (e.g., {@link org.apache.hadoop.mapred.JobClient}).
*
* @see org.apache.hadoop.mapred.JobConf
* @see org.apache.hadoop.mapred.JobClient
* @see org.apache.hadoop.mapreduce.Job
*/
@SuppressWarnings("deprecation")
@InterfaceAudience.Public
@InterfaceStability.Stable
@Deprecated
public class DistributedCache extends
org.apache.hadoop.mapreduce.filecache.DistributedCache {
/**
* Warning: {@link #CACHE_FILES_SIZES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_FILES_SIZES}
*/
@Deprecated
public static final String CACHE_FILES_SIZES =
"mapred.cache.files.filesizes";
/**
* Warning: {@link #CACHE_ARCHIVES_SIZES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_ARCHIVES_SIZES}
*/
@Deprecated
public static final String CACHE_ARCHIVES_SIZES =
"mapred.cache.archives.filesizes";
/**
* Warning: {@link #CACHE_ARCHIVES_TIMESTAMPS} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_ARCHIVES_TIMESTAMPS}
*/
@Deprecated
public static final String CACHE_ARCHIVES_TIMESTAMPS =
"mapred.cache.archives.timestamps";
/**
* Warning: {@link #CACHE_FILES_TIMESTAMPS} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_FILE_TIMESTAMPS}
*/
@Deprecated
public static final String CACHE_FILES_TIMESTAMPS =
"mapred.cache.files.timestamps";
/**
* Warning: {@link #CACHE_ARCHIVES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_ARCHIVES}
*/
@Deprecated
public static final String CACHE_ARCHIVES = "mapred.cache.archives";
/**
* Warning: {@link #CACHE_FILES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_FILES}
*/
@Deprecated
public static final String CACHE_FILES = "mapred.cache.files";
/**
* Warning: {@link #CACHE_LOCALARCHIVES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_LOCALARCHIVES}
*/
@Deprecated
public static final String CACHE_LOCALARCHIVES =
"mapred.cache.localArchives";
/**
* Warning: {@link #CACHE_LOCALFILES} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_LOCALFILES}
*/
@Deprecated
public static final String CACHE_LOCALFILES = "mapred.cache.localFiles";
/**
* Warning: {@link #CACHE_SYMLINK} is not a *public* constant.
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#CACHE_SYMLINK}
*/
@Deprecated
public static final String CACHE_SYMLINK = "mapred.create.symlink";
/**
* Add a archive that has been localized to the conf. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local archives
*/
@Deprecated
public static void addLocalArchives(Configuration conf, String str) {
String archives = conf.get(CACHE_LOCALARCHIVES);
conf.set(CACHE_LOCALARCHIVES, archives == null ? str
: archives + "," + str);
}
/**
* Add a file that has been localized to the conf.. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local files
*/
@Deprecated
public static void addLocalFiles(Configuration conf, String str) {
String files = conf.get(CACHE_LOCALFILES);
conf.set(CACHE_LOCALFILES, files == null ? str
: files + "," + str);
}
/**
* This method create symlinks for all files in a given dir in another
* directory. Currently symlinks cannot be disabled. This is a NO-OP.
*
* @param conf the configuration
* @param jobCacheDir the target directory for creating symlinks
* @param workDir the directory in which the symlinks are created
* @throws IOException
* @deprecated Internal to MapReduce framework. Use DistributedCacheManager
* instead.
*/
@Deprecated
public static void createAllSymlink(
Configuration conf, File jobCacheDir, File workDir)
throws IOException{
// Do nothing
}
/**
* Returns {@link FileStatus} of a given cache file on hdfs. Internal to
* MapReduce.
* @param conf configuration
* @param cache cache file
* @return <code>FileStatus</code> of a given cache file on hdfs
* @throws IOException
*/
@Deprecated
public static FileStatus getFileStatus(Configuration conf, URI cache)
throws IOException {
FileSystem fileSystem = FileSystem.get(cache, conf);
return fileSystem.getFileStatus(new Path(cache.getPath()));
}
/**
* Returns mtime of a given cache file on hdfs. Internal to MapReduce.
* @param conf configuration
* @param cache cache file
* @return mtime of a given cache file on hdfs
* @throws IOException
*/
@Deprecated
public static long getTimestamp(Configuration conf, URI cache)
throws IOException {
return getFileStatus(conf, cache).getModificationTime();
}
/**
* This is to check the timestamp of the archives to be localized.
* Used by internal MapReduce code.
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of archives.
* The order should be the same as the order in which the archives are added.
*/
@Deprecated
public static void setArchiveTimestamps(Configuration conf, String timestamps) {
conf.set(CACHE_ARCHIVES_TIMESTAMPS, timestamps);
}
/**
* This is to check the timestamp of the files to be localized.
* Used by internal MapReduce code.
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of files.
* The order should be the same as the order in which the files are added.
*/
@Deprecated
public static void setFileTimestamps(Configuration conf, String timestamps) {
conf.set(CACHE_FILES_TIMESTAMPS, timestamps);
}
/**
* Set the conf to contain the location for localized archives. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local archives
*/
@Deprecated
public static void setLocalArchives(Configuration conf, String str) {
conf.set(CACHE_LOCALARCHIVES, str);
}
/**
* Set the conf to contain the location for localized files. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local files
*/
@Deprecated
public static void setLocalFiles(Configuration conf, String str) {
conf.set(CACHE_LOCALFILES, str);
}
}
| 12,936 | 37.966867 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CounterGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
/**
* A group of {@link Counter}s that logically belong together. Typically,
* it is an {@link Enum} subclass and the counters are the values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface CounterGroup extends CounterGroupBase<Counter> {
// essentially a typedef so user doesn't have to use generic syntax
}
| 1,368 | 39.264706 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Partitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
/**
* Partitions the key space.
*
* <p><code>Partitioner</code> controls the partitioning of the keys of the
* intermediate map-outputs. The key (or a subset of the key) is used to derive
* the partition, typically by a hash function. The total number of partitions
* is the same as the number of reduce tasks for the job. Hence this controls
* which of the <code>m</code> reduce tasks the intermediate key (and hence the
* record) is sent for reduction.</p>
*
* Note: If you require your Partitioner class to obtain the Job's configuration
* object, implement the {@link Configurable} interface.
*
* @see Reducer
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class Partitioner<KEY, VALUE> {
/**
* Get the partition number for a given key (hence record) given the total
* number of partitions i.e. number of reduce-tasks for the job.
*
* <p>Typically a hash function on a all or a subset of the key.</p>
*
* @param key the key to be partioned.
* @param value the entry value.
* @param numPartitions the total number of partitions.
* @return the partition number for the <code>key</code>.
*/
public abstract int getPartition(KEY key, VALUE value, int numPartitions);
}
| 2,269 | 38.137931 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used to describe the priority of the running job.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum JobPriority {
VERY_HIGH,
HIGH,
NORMAL,
LOW,
VERY_LOW;
}
| 1,163 | 31.333333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* Place holder for cluster level configuration keys.
*
* The keys should have "mapreduce.cluster." as the prefix.
*
*/
@InterfaceAudience.Private
public interface MRConfig {
// Cluster-level configuration parameters
public static final String TEMP_DIR = "mapreduce.cluster.temp.dir";
public static final String LOCAL_DIR = "mapreduce.cluster.local.dir";
public static final String MAPMEMORY_MB = "mapreduce.cluster.mapmemory.mb";
public static final String REDUCEMEMORY_MB =
"mapreduce.cluster.reducememory.mb";
public static final String MR_ACLS_ENABLED = "mapreduce.cluster.acls.enabled";
public static final String MR_ADMINS =
"mapreduce.cluster.administrators";
@Deprecated
public static final String MR_SUPERGROUP =
"mapreduce.cluster.permissions.supergroup";
//Delegation token related keys
public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
"mapreduce.cluster.delegation.key.update-interval";
public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT =
24*60*60*1000; // 1 day
public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
"mapreduce.cluster.delegation.token.renew-interval";
public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
24*60*60*1000; // 1 day
public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
"mapreduce.cluster.delegation.token.max-lifetime";
public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
7*24*60*60*1000; // 7 days
public static final String RESOURCE_CALCULATOR_PROCESS_TREE =
"mapreduce.job.process-tree.class";
public static final String STATIC_RESOLUTIONS =
"mapreduce.job.net.static.resolutions";
public static final String MASTER_ADDRESS = "mapreduce.jobtracker.address";
public static final String MASTER_USER_NAME =
"mapreduce.jobtracker.kerberos.principal";
public static final String FRAMEWORK_NAME = "mapreduce.framework.name";
public static final String CLASSIC_FRAMEWORK_NAME = "classic";
public static final String YARN_FRAMEWORK_NAME = "yarn";
public static final String LOCAL_FRAMEWORK_NAME = "local";
public static final String TASK_LOCAL_OUTPUT_CLASS =
"mapreduce.task.local.output.class";
public static final String PROGRESS_STATUS_LEN_LIMIT_KEY =
"mapreduce.task.max.status.length";
public static final int PROGRESS_STATUS_LEN_LIMIT_DEFAULT = 512;
public static final int MAX_BLOCK_LOCATIONS_DEFAULT = 10;
public static final String MAX_BLOCK_LOCATIONS_KEY =
"mapreduce.job.max.split.locations";
public static final String SHUFFLE_SSL_ENABLED_KEY =
"mapreduce.shuffle.ssl.enabled";
public static final boolean SHUFFLE_SSL_ENABLED_DEFAULT = false;
public static final String SHUFFLE_CONSUMER_PLUGIN =
"mapreduce.job.reduce.shuffle.consumer.plugin.class";
/**
* Configuration key to enable/disable IFile readahead.
*/
public static final String MAPRED_IFILE_READAHEAD =
"mapreduce.ifile.readahead";
public static final boolean DEFAULT_MAPRED_IFILE_READAHEAD = true;
/**
* Configuration key to set the IFile readahead length in bytes.
*/
public static final String MAPRED_IFILE_READAHEAD_BYTES =
"mapreduce.ifile.readahead.bytes";
public static final int DEFAULT_MAPRED_IFILE_READAHEAD_BYTES =
4 * 1024 * 1024;
/**
* Whether users are explicitly trying to control resource monitoring
* configuration for the MiniMRCluster. Disabled by default.
*/
public static final String MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING
= "mapreduce.minicluster.control-resource-monitoring";
public static final boolean
DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false;
@Public
@Unstable
public static final String MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM =
"mapreduce.app-submission.cross-platform";
@Public
@Unstable
public static final boolean DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM =
false;
/**
* Enable application master webapp ui actions.
*/
String MASTER_WEBAPP_UI_ACTIONS_ENABLED =
"mapreduce.webapp.ui-actions.enabled";
boolean DEFAULT_MASTER_WEBAPP_UI_ACTIONS_ENABLED = true;
}
| 5,255 | 37.647059 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/StatusReporter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public abstract class StatusReporter {
public abstract Counter getCounter(Enum<?> name);
public abstract Counter getCounter(String group, String name);
public abstract void progress();
/**
* Get the current progress.
* @return a number between 0.0 and 1.0 (inclusive) indicating the attempt's
* progress.
*/
public abstract float getProgress();
public abstract void setStatus(String status);
}
| 1,351 | 37.628571 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringInterner;
/**************************************************
* Describes the current status of a job.
**************************************************/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class JobStatus implements Writable, Cloneable {
static { // register a ctor
WritableFactories.setFactory
(JobStatus.class,
new WritableFactory() {
public Writable newInstance() { return new JobStatus(); }
});
}
/**
* Current state of the job
*/
public static enum State {
RUNNING(1),
SUCCEEDED(2),
FAILED(3),
PREP(4),
KILLED(5);
int value;
State(int value) {
this.value = value;
}
public int getValue() {
return value;
}
};
private JobID jobid;
private float mapProgress;
private float reduceProgress;
private float cleanupProgress;
private float setupProgress;
private State runState;
private long startTime;
private String user;
private String queue;
private JobPriority priority;
private String schedulingInfo="NA";
private String failureInfo = "NA";
private Map<JobACL, AccessControlList> jobACLs =
new HashMap<JobACL, AccessControlList>();
private String jobName;
private String jobFile;
private long finishTime;
private boolean isRetired;
private String historyFile = "";
private String trackingUrl ="";
private int numUsedSlots;
private int numReservedSlots;
private int usedMem;
private int reservedMem;
private int neededMem;
private boolean isUber;
/**
*/
public JobStatus() {
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
State runState, JobPriority jp, String user, String jobName,
String jobFile, String trackingUrl) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp, user, jobName, "default", jobFile, trackingUrl, false);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param queue queue name
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
State runState, JobPriority jp,
String user, String jobName, String queue,
String jobFile, String trackingUrl) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp, user, jobName, queue, jobFile, trackingUrl, false);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param queue queue name
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
* @param isUber Whether job running in uber mode
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
State runState, JobPriority jp,
String user, String jobName, String queue,
String jobFile, String trackingUrl, boolean isUber) {
this.jobid = jobid;
this.setupProgress = setupProgress;
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
this.cleanupProgress = cleanupProgress;
this.runState = runState;
this.user = user;
this.queue = queue;
if (jp == null) {
throw new IllegalArgumentException("Job Priority cannot be null.");
}
priority = jp;
this.jobName = jobName;
this.jobFile = jobFile;
this.trackingUrl = trackingUrl;
this.isUber = isUber;
}
/**
* Sets the map progress of this job
* @param p The value of map progress to set to
*/
protected synchronized void setMapProgress(float p) {
this.mapProgress = (float) Math.min(1.0, Math.max(0.0, p));
}
/**
* Sets the cleanup progress of this job
* @param p The value of cleanup progress to set to
*/
protected synchronized void setCleanupProgress(float p) {
this.cleanupProgress = (float) Math.min(1.0, Math.max(0.0, p));
}
/**
* Sets the setup progress of this job
* @param p The value of setup progress to set to
*/
protected synchronized void setSetupProgress(float p) {
this.setupProgress = (float) Math.min(1.0, Math.max(0.0, p));
}
/**
* Sets the reduce progress of this Job
* @param p The value of reduce progress to set to
*/
protected synchronized void setReduceProgress(float p) {
this.reduceProgress = (float) Math.min(1.0, Math.max(0.0, p));
}
/**
* Set the priority of the job, defaulting to NORMAL.
* @param jp new job priority
*/
protected synchronized void setPriority(JobPriority jp) {
if (jp == null) {
throw new IllegalArgumentException("Job priority cannot be null.");
}
priority = jp;
}
/**
* Set the finish time of the job
* @param finishTime The finishTime of the job
*/
protected synchronized void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
/**
* Set the job history file url for a completed job
*/
protected synchronized void setHistoryFile(String historyFile) {
this.historyFile = historyFile;
}
/**
* Set the link to the web-ui for details of the job.
*/
protected synchronized void setTrackingUrl(String trackingUrl) {
this.trackingUrl = trackingUrl;
}
/**
* Set the job retire flag to true.
*/
protected synchronized void setRetired() {
this.isRetired = true;
}
/**
* Change the current run state of the job.
*/
protected synchronized void setState(State state) {
this.runState = state;
}
/**
* Set the start time of the job
* @param startTime The startTime of the job
*/
protected synchronized void setStartTime(long startTime) {
this.startTime = startTime;
}
/**
* @param userName The username of the job
*/
protected synchronized void setUsername(String userName) {
this.user = userName;
}
/**
* Used to set the scheduling information associated to a particular Job.
*
* @param schedulingInfo Scheduling information of the job
*/
protected synchronized void setSchedulingInfo(String schedulingInfo) {
this.schedulingInfo = schedulingInfo;
}
/**
* Set the job acls.
*
* @param acls {@link Map} from {@link JobACL} to {@link AccessControlList}
*/
protected synchronized void setJobACLs(Map<JobACL, AccessControlList> acls) {
this.jobACLs = acls;
}
/**
* Set queue name
* @param queue queue name
*/
protected synchronized void setQueue(String queue) {
this.queue = queue;
}
/**
* Set diagnostic information.
* @param failureInfo diagnostic information
*/
protected synchronized void setFailureInfo(String failureInfo) {
this.failureInfo = failureInfo;
}
/**
* Get queue name
* @return queue name
*/
public synchronized String getQueue() {
return queue;
}
/**
* @return Percentage of progress in maps
*/
public synchronized float getMapProgress() { return mapProgress; }
/**
* @return Percentage of progress in cleanup
*/
public synchronized float getCleanupProgress() { return cleanupProgress; }
/**
* @return Percentage of progress in setup
*/
public synchronized float getSetupProgress() { return setupProgress; }
/**
* @return Percentage of progress in reduce
*/
public synchronized float getReduceProgress() { return reduceProgress; }
/**
* @return running state of the job
*/
public synchronized State getState() { return runState; }
/**
* @return start time of the job
*/
synchronized public long getStartTime() { return startTime;}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException cnse) {
// Shouldn't happen since we do implement Clonable
throw new InternalError(cnse.toString());
}
}
/**
* @return The jobid of the Job
*/
public JobID getJobID() { return jobid; }
/**
* @return the username of the job
*/
public synchronized String getUsername() { return this.user;}
/**
* Gets the Scheduling information associated to a particular Job.
* @return the scheduling information of the job
*/
public synchronized String getSchedulingInfo() {
return schedulingInfo;
}
/**
* Get the job acls.
*
* @return a {@link Map} from {@link JobACL} to {@link AccessControlList}
*/
public synchronized Map<JobACL, AccessControlList> getJobACLs() {
return jobACLs;
}
/**
* Return the priority of the job
* @return job priority
*/
public synchronized JobPriority getPriority() { return priority; }
/**
* Gets any available info on the reason of failure of the job.
* @return diagnostic information on why a job might have failed.
*/
public synchronized String getFailureInfo() {
return this.failureInfo;
}
/**
* Returns true if the status is for a completed job.
*/
public synchronized boolean isJobComplete() {
return (runState == JobStatus.State.SUCCEEDED ||
runState == JobStatus.State.FAILED ||
runState == JobStatus.State.KILLED);
}
///////////////////////////////////////
// Writable
///////////////////////////////////////
public synchronized void write(DataOutput out) throws IOException {
jobid.write(out);
out.writeFloat(setupProgress);
out.writeFloat(mapProgress);
out.writeFloat(reduceProgress);
out.writeFloat(cleanupProgress);
WritableUtils.writeEnum(out, runState);
out.writeLong(startTime);
Text.writeString(out, user);
WritableUtils.writeEnum(out, priority);
Text.writeString(out, schedulingInfo);
out.writeLong(finishTime);
out.writeBoolean(isRetired);
Text.writeString(out, historyFile);
Text.writeString(out, jobName);
Text.writeString(out, trackingUrl);
Text.writeString(out, jobFile);
out.writeBoolean(isUber);
// Serialize the job's ACLs
out.writeInt(jobACLs.size());
for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
WritableUtils.writeEnum(out, entry.getKey());
entry.getValue().write(out);
}
}
public synchronized void readFields(DataInput in) throws IOException {
this.jobid = new JobID();
this.jobid.readFields(in);
this.setupProgress = in.readFloat();
this.mapProgress = in.readFloat();
this.reduceProgress = in.readFloat();
this.cleanupProgress = in.readFloat();
this.runState = WritableUtils.readEnum(in, State.class);
this.startTime = in.readLong();
this.user = StringInterner.weakIntern(Text.readString(in));
this.priority = WritableUtils.readEnum(in, JobPriority.class);
this.schedulingInfo = StringInterner.weakIntern(Text.readString(in));
this.finishTime = in.readLong();
this.isRetired = in.readBoolean();
this.historyFile = StringInterner.weakIntern(Text.readString(in));
this.jobName = StringInterner.weakIntern(Text.readString(in));
this.trackingUrl = StringInterner.weakIntern(Text.readString(in));
this.jobFile = StringInterner.weakIntern(Text.readString(in));
this.isUber = in.readBoolean();
// De-serialize the job's ACLs
int numACLs = in.readInt();
for (int i = 0; i < numACLs; i++) {
JobACL aclType = WritableUtils.readEnum(in, JobACL.class);
AccessControlList acl = new AccessControlList(" ");
acl.readFields(in);
this.jobACLs.put(aclType, acl);
}
}
/**
* Get the user-specified job name.
*/
public String getJobName() {
return jobName;
}
/**
* Get the configuration file for the job.
*/
public String getJobFile() {
return jobFile;
}
/**
* Get the link to the web-ui for details of the job.
*/
public synchronized String getTrackingUrl() {
return trackingUrl;
}
/**
* Get the finish time of the job.
*/
public synchronized long getFinishTime() {
return finishTime;
}
/**
* Check whether the job has retired.
*/
public synchronized boolean isRetired() {
return isRetired;
}
/**
* @return the job history file name for a completed job. If job is not
* completed or history file not available then return null.
*/
public synchronized String getHistoryFile() {
return historyFile;
}
/**
* @return number of used mapred slots
*/
public int getNumUsedSlots() {
return numUsedSlots;
}
/**
* @param n number of used mapred slots
*/
public void setNumUsedSlots(int n) {
numUsedSlots = n;
}
/**
* @return the number of reserved slots
*/
public int getNumReservedSlots() {
return numReservedSlots;
}
/**
* @param n the number of reserved slots
*/
public void setNumReservedSlots(int n) {
this.numReservedSlots = n;
}
/**
* @return the used memory
*/
public int getUsedMem() {
return usedMem;
}
/**
* @param m the used memory
*/
public void setUsedMem(int m) {
this.usedMem = m;
}
/**
* @return the reserved memory
*/
public int getReservedMem() {
return reservedMem;
}
/**
* @param r the reserved memory
*/
public void setReservedMem(int r) {
this.reservedMem = r;
}
/**
* @return the needed memory
*/
public int getNeededMem() {
return neededMem;
}
/**
* @param n the needed memory
*/
public void setNeededMem(int n) {
this.neededMem = n;
}
/**
* Whether job running in uber mode
* @return job in uber-mode
*/
public synchronized boolean isUber() {
return isUber;
}
/**
* Set uber-mode flag
* @param isUber Whether job running in uber-mode
*/
public synchronized void setUber(boolean isUber) {
this.isUber = isUber;
}
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("job-id : " + jobid);
buffer.append("uber-mode : " + isUber);
buffer.append("map-progress : " + mapProgress);
buffer.append("reduce-progress : " + reduceProgress);
buffer.append("cleanup-progress : " + cleanupProgress);
buffer.append("setup-progress : " + setupProgress);
buffer.append("runstate : " + runState);
buffer.append("start-time : " + startTime);
buffer.append("user-name : " + user);
buffer.append("priority : " + priority);
buffer.append("scheduling-info : " + schedulingInfo);
buffer.append("num-used-slots" + numUsedSlots);
buffer.append("num-reserved-slots" + numReservedSlots);
buffer.append("used-mem" + usedMem);
buffer.append("reserved-mem" + reservedMem);
buffer.append("needed-mem" + neededMem);
return buffer.toString();
}
}
| 18,097 | 27.681458 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Enum for map, reduce, job-setup, job-cleanup, task-cleanup task types.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum TaskType {
MAP, REDUCE, JOB_SETUP, JOB_CLEANUP, TASK_CLEANUP
}
| 1,178 | 35.84375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskInputOutputContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A context object that allows input and output from the task. It is only
* supplied to the {@link Mapper} or {@link Reducer}.
* @param <KEYIN> the input key type for the task
* @param <VALUEIN> the input value type for the task
* @param <KEYOUT> the output key type for the task
* @param <VALUEOUT> the output value type for the task
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TaskInputOutputContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskAttemptContext {
/**
* Advance to the next key, value pair, returning null if at end.
* @return the key object that was read into, or null if no more
*/
public boolean nextKeyValue() throws IOException, InterruptedException;
/**
* Get the current key.
* @return the current key object or null if there isn't one
* @throws IOException
* @throws InterruptedException
*/
public KEYIN getCurrentKey() throws IOException, InterruptedException;
/**
* Get the current value.
* @return the value object that was read into
* @throws IOException
* @throws InterruptedException
*/
public VALUEIN getCurrentValue() throws IOException, InterruptedException;
/**
* Generate an output key/value pair.
*/
public void write(KEYOUT key, VALUEOUT value)
throws IOException, InterruptedException;
/**
* Get the {@link OutputCommitter} for the task-attempt.
* @return the <code>OutputCommitter</code> for the task-attempt
*/
public OutputCommitter getOutputCommitter();
}
| 2,528 | 33.643836 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/InputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.mapred.SplitLocationInfo;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
/**
* <code>InputSplit</code> represents the data to be processed by an
* individual {@link Mapper}.
*
* <p>Typically, it presents a byte-oriented view on the input and is the
* responsibility of {@link RecordReader} of the job to process this and present
* a record-oriented view.
*
* @see InputFormat
* @see RecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class InputSplit {
/**
* Get the size of the split, so that the input splits can be sorted by size.
* @return the number of bytes in the split
* @throws IOException
* @throws InterruptedException
*/
public abstract long getLength() throws IOException, InterruptedException;
/**
* Get the list of nodes by name where the data for the split would be local.
* The locations do not need to be serialized.
*
* @return a new array of the node nodes.
* @throws IOException
* @throws InterruptedException
*/
public abstract
String[] getLocations() throws IOException, InterruptedException;
/**
* Gets info about which nodes the input split is stored on and how it is
* stored at each location.
*
* @return list of <code>SplitLocationInfo</code>s describing how the split
* data is stored at each location. A null value indicates that all the
* locations have the data stored on disk.
* @throws IOException
*/
@Evolving
public SplitLocationInfo[] getLocationInfo() throws IOException {
return null;
}
}
| 2,754 | 34.779221 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class JobResourceUploader {
protected static final Log LOG = LogFactory.getLog(JobResourceUploader.class);
private FileSystem jtFs;
JobResourceUploader(FileSystem submitFs) {
this.jtFs = submitFs;
}
/**
* Upload and configure files, libjars, jobjars, and archives pertaining to
* the passed job.
*
* @param job the job containing the files to be uploaded
* @param submitJobDir the submission directory of the job
* @throws IOException
*/
public void uploadFiles(Job job, Path submitJobDir) throws IOException {
Configuration conf = job.getConfiguration();
short replication =
(short) conf.getInt(Job.SUBMIT_REPLICATION,
Job.DEFAULT_SUBMIT_REPLICATION);
if (!(conf.getBoolean(Job.USED_GENERIC_PARSER, false))) {
LOG.warn("Hadoop command-line option parsing not performed. "
+ "Implement the Tool interface and execute your application "
+ "with ToolRunner to remedy this.");
}
// get all the command line arguments passed in by the user conf
String files = conf.get("tmpfiles");
String libjars = conf.get("tmpjars");
String archives = conf.get("tmparchives");
String jobJar = job.getJar();
//
// Figure out what fs the JobTracker is using. Copy the
// job to it, under a temporary name. This allows DFS to work,
// and under the local fs also provides UNIX-like object loading
// semantics. (that is, if the job file is deleted right after
// submission, we can still run the submission to completion)
//
// Create a number of filenames in the JobTracker's fs namespace
LOG.debug("default FileSystem: " + jtFs.getUri());
if (jtFs.exists(submitJobDir)) {
throw new IOException("Not submitting job. Job directory " + submitJobDir
+ " already exists!! This is unexpected.Please check what's there in"
+ " that directory");
}
submitJobDir = jtFs.makeQualified(submitJobDir);
submitJobDir = new Path(submitJobDir.toUri().getPath());
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
FileSystem.mkdirs(jtFs, submitJobDir, mapredSysPerms);
Path filesDir = JobSubmissionFiles.getJobDistCacheFiles(submitJobDir);
Path archivesDir = JobSubmissionFiles.getJobDistCacheArchives(submitJobDir);
Path libjarsDir = JobSubmissionFiles.getJobDistCacheLibjars(submitJobDir);
// add all the command line files/ jars and archive
// first copy them to jobtrackers filesystem
if (files != null) {
FileSystem.mkdirs(jtFs, filesDir, mapredSysPerms);
String[] fileArr = files.split(",");
for (String tmpFile : fileArr) {
URI tmpURI = null;
try {
tmpURI = new URI(tmpFile);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path tmp = new Path(tmpURI);
Path newPath = copyRemoteFiles(filesDir, tmp, conf, replication);
try {
URI pathURI = getPathURI(newPath, tmpURI.getFragment());
DistributedCache.addCacheFile(pathURI, conf);
} catch (URISyntaxException ue) {
// should not throw a uri exception
throw new IOException("Failed to create uri for " + tmpFile, ue);
}
}
}
if (libjars != null) {
FileSystem.mkdirs(jtFs, libjarsDir, mapredSysPerms);
String[] libjarsArr = libjars.split(",");
for (String tmpjars : libjarsArr) {
Path tmp = new Path(tmpjars);
Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
DistributedCache.addFileToClassPath(
new Path(newPath.toUri().getPath()), conf, jtFs);
}
}
if (archives != null) {
FileSystem.mkdirs(jtFs, archivesDir, mapredSysPerms);
String[] archivesArr = archives.split(",");
for (String tmpArchives : archivesArr) {
URI tmpURI;
try {
tmpURI = new URI(tmpArchives);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path tmp = new Path(tmpURI);
Path newPath = copyRemoteFiles(archivesDir, tmp, conf, replication);
try {
URI pathURI = getPathURI(newPath, tmpURI.getFragment());
DistributedCache.addCacheArchive(pathURI, conf);
} catch (URISyntaxException ue) {
// should not throw an uri excpetion
throw new IOException("Failed to create uri for " + tmpArchives, ue);
}
}
}
if (jobJar != null) { // copy jar to JobTracker's fs
// use jar name if job is not named.
if ("".equals(job.getJobName())) {
job.setJobName(new Path(jobJar).getName());
}
Path jobJarPath = new Path(jobJar);
URI jobJarURI = jobJarPath.toUri();
// If the job jar is already in a global fs,
// we don't need to copy it from local fs
if (jobJarURI.getScheme() == null || jobJarURI.getScheme().equals("file")) {
copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir),
replication);
job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
}
} else {
LOG.warn("No job jar file set. User classes may not be found. "
+ "See Job or Job#setJar(String).");
}
addLog4jToDistributedCache(job, submitJobDir);
// set the timestamps of the archives and files
// set the public/private visibility of the archives and files
ClientDistributedCacheManager.determineTimestampsAndCacheVisibilities(conf);
// get DelegationToken for cached file
ClientDistributedCacheManager.getDelegationTokens(conf,
job.getCredentials());
}
// copies a file to the jobtracker filesystem and returns the path where it
// was copied to
private Path copyRemoteFiles(Path parentDir, Path originalPath,
Configuration conf, short replication) throws IOException {
// check if we do not need to copy the files
// is jt using the same file system.
// just checking for uri strings... doing no dns lookups
// to see if the filesystems are the same. This is not optimal.
// but avoids name resolution.
FileSystem remoteFs = null;
remoteFs = originalPath.getFileSystem(conf);
if (FileUtil.compareFs(remoteFs, jtFs)) {
return originalPath;
}
// this might have name collisions. copy will throw an exception
// parse the original path to create new path
Path newPath = new Path(parentDir, originalPath.getName());
FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
jtFs.setReplication(newPath, replication);
return newPath;
}
private void copyJar(Path originalJarPath, Path submitJarFile,
short replication) throws IOException {
jtFs.copyFromLocalFile(originalJarPath, submitJarFile);
jtFs.setReplication(submitJarFile, replication);
jtFs.setPermission(submitJarFile, new FsPermission(
JobSubmissionFiles.JOB_FILE_PERMISSION));
}
private void addLog4jToDistributedCache(Job job, Path jobSubmitDir)
throws IOException {
Configuration conf = job.getConfiguration();
String log4jPropertyFile =
conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, "");
if (!log4jPropertyFile.isEmpty()) {
short replication = (short) conf.getInt(Job.SUBMIT_REPLICATION, 10);
copyLog4jPropertyFile(job, jobSubmitDir, replication);
}
}
private URI getPathURI(Path destPath, String fragment)
throws URISyntaxException {
URI pathURI = destPath.toUri();
if (pathURI.getFragment() == null) {
if (fragment == null) {
pathURI = new URI(pathURI.toString() + "#" + destPath.getName());
} else {
pathURI = new URI(pathURI.toString() + "#" + fragment);
}
}
return pathURI;
}
// copy user specified log4j.property file in local
// to HDFS with putting on distributed cache and adding its parent directory
// to classpath.
@SuppressWarnings("deprecation")
private void copyLog4jPropertyFile(Job job, Path submitJobDir,
short replication) throws IOException {
Configuration conf = job.getConfiguration();
String file =
validateFilePath(
conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE), conf);
LOG.debug("default FileSystem: " + jtFs.getUri());
FsPermission mapredSysPerms =
new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
if (!jtFs.exists(submitJobDir)) {
throw new IOException("Cannot find job submission directory! "
+ "It should just be created, so something wrong here.");
}
Path fileDir = JobSubmissionFiles.getJobLog4jFile(submitJobDir);
// first copy local log4j.properties file to HDFS under submitJobDir
if (file != null) {
FileSystem.mkdirs(jtFs, fileDir, mapredSysPerms);
URI tmpURI = null;
try {
tmpURI = new URI(file);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path tmp = new Path(tmpURI);
Path newPath = copyRemoteFiles(fileDir, tmp, conf, replication);
DistributedCache.addFileToClassPath(new Path(newPath.toUri().getPath()),
conf);
}
}
/**
* takes input as a path string for file and verifies if it exist. It defaults
* for file:/// if the files specified do not have a scheme. it returns the
* paths uri converted defaulting to file:///. So an input of /home/user/file1
* would return file:///home/user/file1
*
* @param file
* @param conf
* @return
*/
private String validateFilePath(String file, Configuration conf)
throws IOException {
if (file == null) {
return null;
}
if (file.isEmpty()) {
throw new IllegalArgumentException("File name can't be empty string");
}
String finalPath;
URI pathURI;
try {
pathURI = new URI(file);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path path = new Path(pathURI);
FileSystem localFs = FileSystem.getLocal(conf);
if (pathURI.getScheme() == null) {
// default to the local file system
// check if the file exists or not first
if (!localFs.exists(path)) {
throw new FileNotFoundException("File " + file + " does not exist.");
}
finalPath =
path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory())
.toString();
} else {
// check if the file exists in this file system
// we need to recreate this filesystem object to copy
// these files to the file system ResourceManager is running
// on.
FileSystem fs = path.getFileSystem(conf);
if (!fs.exists(path)) {
throw new FileNotFoundException("File " + file + " does not exist.");
}
finalPath =
path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
}
return finalPath;
}
}
| 12,616 | 37.466463 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobACL.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.*;
/**
* Job related ACLs
*/
@InterfaceAudience.Private
public enum JobACL {
/**
* ACL for 'viewing' job. Dictates who can 'view' some or all of the job
* related details.
*/
VIEW_JOB(MRJobConfig.JOB_ACL_VIEW_JOB),
/**
* ACL for 'modifying' job. Dictates who can 'modify' the job for e.g., by
* killing the job, killing/failing a task of the job or setting priority of
* the job.
*/
MODIFY_JOB(MRJobConfig.JOB_ACL_MODIFY_JOB);
String aclName;
JobACL(String name) {
this.aclName = name;
}
/**
* Get the name of the ACL. Here it is same as the name of the configuration
* property for specifying the ACL for the job.
*
* @return aclName
*/
public String getAclName() {
return aclName;
}
}
| 1,645 | 27.877193 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.TIPStatus;
import org.apache.hadoop.mapred.TaskID;
import org.apache.hadoop.util.StringInterner;
/** A report on the state of a task. */
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TaskReport implements Writable {
private TaskID taskid;
private float progress;
private String state;
private String[] diagnostics;
private long startTime;
private long finishTime;
private Counters counters;
private TIPStatus currentStatus;
private Collection<TaskAttemptID> runningAttempts =
new ArrayList<TaskAttemptID>();
private TaskAttemptID successfulAttempt = new TaskAttemptID();
public TaskReport() {
taskid = new TaskID();
}
/**
* Creates a new TaskReport object
* @param taskid
* @param progress
* @param state
* @param diagnostics
* @param currentStatus
* @param startTime
* @param finishTime
* @param counters
*/
public TaskReport(TaskID taskid, float progress, String state,
String[] diagnostics, TIPStatus currentStatus,
long startTime, long finishTime,
Counters counters) {
this.taskid = taskid;
this.progress = progress;
this.state = state;
this.diagnostics = diagnostics;
this.currentStatus = currentStatus;
this.startTime = startTime;
this.finishTime = finishTime;
this.counters = counters;
}
/** The string of the task ID. */
public String getTaskId() {
return taskid.toString();
}
/** The ID of the task. */
public TaskID getTaskID() {
return taskid;
}
/** The amount completed, between zero and one. */
public float getProgress() { return progress; }
/** The most recent state, reported by the Reporter. */
public String getState() { return state; }
/** A list of error messages. */
public String[] getDiagnostics() { return diagnostics; }
/** A table of counters. */
public Counters getTaskCounters() { return counters; }
/** The current status */
public TIPStatus getCurrentStatus() {
return currentStatus;
}
/**
* Get finish time of task.
* @return 0, if finish time was not set else returns finish time.
*/
public long getFinishTime() {
return finishTime;
}
/**
* set finish time of task.
* @param finishTime finish time of task.
*/
protected void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
/**
* Get start time of task.
* @return 0 if start time was not set, else start time.
*/
public long getStartTime() {
return startTime;
}
/**
* set start time of the task.
*/
protected void setStartTime(long startTime) {
this.startTime = startTime;
}
/**
* set successful attempt ID of the task.
*/
protected void setSuccessfulAttemptId(TaskAttemptID t) {
successfulAttempt = t;
}
/**
* Get the attempt ID that took this task to completion
*/
public TaskAttemptID getSuccessfulTaskAttemptId() {
return successfulAttempt;
}
/**
* set running attempt(s) of the task.
*/
protected void setRunningTaskAttemptIds(
Collection<TaskAttemptID> runningAttempts) {
this.runningAttempts = runningAttempts;
}
/**
* Get the running task attempt IDs for this task
*/
public Collection<TaskAttemptID> getRunningTaskAttemptIds() {
return runningAttempts;
}
@Override
public boolean equals(Object o) {
if(o == null)
return false;
if(o.getClass().equals(this.getClass())) {
TaskReport report = (TaskReport) o;
return counters.equals(report.getTaskCounters())
&& Arrays.toString(this.diagnostics)
.equals(Arrays.toString(report.getDiagnostics()))
&& this.finishTime == report.getFinishTime()
&& this.progress == report.getProgress()
&& this.startTime == report.getStartTime()
&& this.state.equals(report.getState())
&& this.taskid.equals(report.getTaskID());
}
return false;
}
@Override
public int hashCode() {
return (counters.toString() + Arrays.toString(this.diagnostics)
+ this.finishTime + this.progress + this.startTime + this.state
+ this.taskid.toString()).hashCode();
}
//////////////////////////////////////////////
// Writable
//////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
taskid.write(out);
out.writeFloat(progress);
Text.writeString(out, state);
out.writeLong(startTime);
out.writeLong(finishTime);
WritableUtils.writeStringArray(out, diagnostics);
counters.write(out);
WritableUtils.writeEnum(out, currentStatus);
if (currentStatus == TIPStatus.RUNNING) {
WritableUtils.writeVInt(out, runningAttempts.size());
TaskAttemptID t[] = new TaskAttemptID[0];
t = runningAttempts.toArray(t);
for (int i = 0; i < t.length; i++) {
t[i].write(out);
}
} else if (currentStatus == TIPStatus.COMPLETE) {
successfulAttempt.write(out);
}
}
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
this.progress = in.readFloat();
this.state = StringInterner.weakIntern(Text.readString(in));
this.startTime = in.readLong();
this.finishTime = in.readLong();
diagnostics = WritableUtils.readStringArray(in);
counters = new Counters();
counters.readFields(in);
currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
if (currentStatus == TIPStatus.RUNNING) {
int num = WritableUtils.readVInt(in);
for (int i = 0; i < num; i++) {
TaskAttemptID t = new TaskAttemptID();
t.readFields(in);
runningAttempts.add(t);
}
} else if (currentStatus == TIPStatus.COMPLETE) {
successfulAttempt.readFields(in);
}
}
}
| 7,160 | 28.8375 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/RecordWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
/**
* <code>RecordWriter</code> writes the output <key, value> pairs
* to an output file.
* <p><code>RecordWriter</code> implementations write the job outputs to the
* {@link FileSystem}.
*
* @see OutputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class RecordWriter<K, V> {
/**
* Writes a key/value pair.
*
* @param key the key to write.
* @param value the value to write.
* @throws IOException
*/
public abstract void write(K key, V value
) throws IOException, InterruptedException;
/**
* Close this <code>RecordWriter</code> to future operations.
*
* @param context the context of the task
* @throws IOException
*/
public abstract void close(TaskAttemptContext context
) throws IOException, InterruptedException;
}
| 1,914 | 32.017241 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
/**
* Provides a way to access information about the map/reduce cluster.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Cluster {
@InterfaceStability.Evolving
public static enum JobTrackerStatus {INITIALIZING, RUNNING};
private ClientProtocolProvider clientProtocolProvider;
private ClientProtocol client;
private UserGroupInformation ugi;
private Configuration conf;
private FileSystem fs = null;
private Path sysDir = null;
private Path stagingAreaDir = null;
private Path jobHistoryDir = null;
private static final Log LOG = LogFactory.getLog(Cluster.class);
private static ServiceLoader<ClientProtocolProvider> frameworkLoader =
ServiceLoader.load(ClientProtocolProvider.class);
static {
ConfigUtil.loadResources();
}
public Cluster(Configuration conf) throws IOException {
this(null, conf);
}
public Cluster(InetSocketAddress jobTrackAddr, Configuration conf)
throws IOException {
this.conf = conf;
this.ugi = UserGroupInformation.getCurrentUser();
initialize(jobTrackAddr, conf);
}
private void initialize(InetSocketAddress jobTrackAddr, Configuration conf)
throws IOException {
synchronized (frameworkLoader) {
for (ClientProtocolProvider provider : frameworkLoader) {
LOG.debug("Trying ClientProtocolProvider : "
+ provider.getClass().getName());
ClientProtocol clientProtocol = null;
try {
if (jobTrackAddr == null) {
clientProtocol = provider.create(conf);
} else {
clientProtocol = provider.create(jobTrackAddr, conf);
}
if (clientProtocol != null) {
clientProtocolProvider = provider;
client = clientProtocol;
LOG.debug("Picked " + provider.getClass().getName()
+ " as the ClientProtocolProvider");
break;
}
else {
LOG.debug("Cannot pick " + provider.getClass().getName()
+ " as the ClientProtocolProvider - returned null protocol");
}
}
catch (Exception e) {
LOG.info("Failed to use " + provider.getClass().getName()
+ " due to error: " + e.getMessage());
}
}
}
if (null == clientProtocolProvider || null == client) {
throw new IOException(
"Cannot initialize Cluster. Please check your configuration for "
+ MRConfig.FRAMEWORK_NAME
+ " and the correspond server addresses.");
}
}
ClientProtocol getClient() {
return client;
}
Configuration getConf() {
return conf;
}
/**
* Close the <code>Cluster</code>.
* @throws IOException
*/
public synchronized void close() throws IOException {
clientProtocolProvider.close(client);
}
private Job[] getJobs(JobStatus[] stats) throws IOException {
List<Job> jobs = new ArrayList<Job>();
for (JobStatus stat : stats) {
jobs.add(Job.getInstance(this, stat, new JobConf(stat.getJobFile())));
}
return jobs.toArray(new Job[0]);
}
/**
* Get the file system where job-specific files are stored
*
* @return object of FileSystem
* @throws IOException
* @throws InterruptedException
*/
public synchronized FileSystem getFileSystem()
throws IOException, InterruptedException {
if (this.fs == null) {
try {
this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException, InterruptedException {
final Path sysDir = new Path(client.getSystemDir());
return sysDir.getFileSystem(getConf());
}
});
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return fs;
}
/**
* Get job corresponding to jobid.
*
* @param jobId
* @return object of {@link Job}
* @throws IOException
* @throws InterruptedException
*/
public Job getJob(JobID jobId) throws IOException, InterruptedException {
JobStatus status = client.getJobStatus(jobId);
if (status != null) {
final JobConf conf = new JobConf();
final Path jobPath = new Path(client.getFilesystemName(),
status.getJobFile());
final FileSystem fs = FileSystem.get(jobPath.toUri(), getConf());
try {
conf.addResource(fs.open(jobPath), jobPath.toString());
} catch (FileNotFoundException fnf) {
if (LOG.isWarnEnabled()) {
LOG.warn("Job conf missing on cluster", fnf);
}
}
return Job.getInstance(this, status, conf);
}
return null;
}
/**
* Get all the queues in cluster.
*
* @return array of {@link QueueInfo}
* @throws IOException
* @throws InterruptedException
*/
public QueueInfo[] getQueues() throws IOException, InterruptedException {
return client.getQueues();
}
/**
* Get queue information for the specified name.
*
* @param name queuename
* @return object of {@link QueueInfo}
* @throws IOException
* @throws InterruptedException
*/
public QueueInfo getQueue(String name)
throws IOException, InterruptedException {
return client.getQueue(name);
}
/**
* Get log parameters for the specified jobID or taskAttemptID
* @param jobID the job id.
* @param taskAttemptID the task attempt id. Optional.
* @return the LogParams
* @throws IOException
* @throws InterruptedException
*/
public LogParams getLogParams(JobID jobID, TaskAttemptID taskAttemptID)
throws IOException, InterruptedException {
return client.getLogFileParams(jobID, taskAttemptID);
}
/**
* Get current cluster status.
*
* @return object of {@link ClusterMetrics}
* @throws IOException
* @throws InterruptedException
*/
public ClusterMetrics getClusterStatus() throws IOException, InterruptedException {
return client.getClusterMetrics();
}
/**
* Get all active trackers in the cluster.
*
* @return array of {@link TaskTrackerInfo}
* @throws IOException
* @throws InterruptedException
*/
public TaskTrackerInfo[] getActiveTaskTrackers()
throws IOException, InterruptedException {
return client.getActiveTrackers();
}
/**
* Get blacklisted trackers.
*
* @return array of {@link TaskTrackerInfo}
* @throws IOException
* @throws InterruptedException
*/
public TaskTrackerInfo[] getBlackListedTaskTrackers()
throws IOException, InterruptedException {
return client.getBlacklistedTrackers();
}
/**
* Get all the jobs in cluster.
*
* @return array of {@link Job}
* @throws IOException
* @throws InterruptedException
* @deprecated Use {@link #getAllJobStatuses()} instead.
*/
@Deprecated
public Job[] getAllJobs() throws IOException, InterruptedException {
return getJobs(client.getAllJobs());
}
/**
* Get job status for all jobs in the cluster.
* @return job status for all jobs in cluster
* @throws IOException
* @throws InterruptedException
*/
public JobStatus[] getAllJobStatuses() throws IOException, InterruptedException {
return client.getAllJobs();
}
/**
* Grab the jobtracker system directory path where
* job-specific files will be placed.
*
* @return the system directory where job-specific files are to be placed.
*/
public Path getSystemDir() throws IOException, InterruptedException {
if (sysDir == null) {
sysDir = new Path(client.getSystemDir());
}
return sysDir;
}
/**
* Grab the jobtracker's view of the staging directory path where
* job-specific files will be placed.
*
* @return the staging directory where job-specific files are to be placed.
*/
public Path getStagingAreaDir() throws IOException, InterruptedException {
if (stagingAreaDir == null) {
stagingAreaDir = new Path(client.getStagingAreaDir());
}
return stagingAreaDir;
}
/**
* Get the job history file path for a given job id. The job history file at
* this path may or may not be existing depending on the job completion state.
* The file is present only for the completed jobs.
* @param jobId the JobID of the job submitted by the current user.
* @return the file path of the job history file
* @throws IOException
* @throws InterruptedException
*/
public String getJobHistoryUrl(JobID jobId) throws IOException,
InterruptedException {
if (jobHistoryDir == null) {
jobHistoryDir = new Path(client.getJobHistoryDir());
}
return new Path(jobHistoryDir, jobId.toString() + "_"
+ ugi.getShortUserName()).toString();
}
/**
* Gets the Queue ACLs for current user
* @return array of QueueAclsInfo object for current user.
* @throws IOException
*/
public QueueAclsInfo[] getQueueAclsForCurrentUser()
throws IOException, InterruptedException {
return client.getQueueAclsForCurrentUser();
}
/**
* Gets the root level queues.
* @return array of JobQueueInfo object.
* @throws IOException
*/
public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
return client.getRootQueues();
}
/**
* Returns immediate children of queueName.
* @param queueName
* @return array of JobQueueInfo which are children of queueName
* @throws IOException
*/
public QueueInfo[] getChildQueues(String queueName)
throws IOException, InterruptedException {
return client.getChildQueues(queueName);
}
/**
* Get the JobTracker's status.
*
* @return {@link JobTrackerStatus} of the JobTracker
* @throws IOException
* @throws InterruptedException
*/
public JobTrackerStatus getJobTrackerStatus() throws IOException,
InterruptedException {
return client.getJobTrackerStatus();
}
/**
* Get the tasktracker expiry interval for the cluster
* @return the expiry interval in msec
*/
public long getTaskTrackerExpiryInterval() throws IOException,
InterruptedException {
return client.getTaskTrackerExpiryInterval();
}
/**
* Get a delegation token for the user from the JobTracker.
* @param renewer the user who can renew the token
* @return the new token
* @throws IOException
*/
public Token<DelegationTokenIdentifier>
getDelegationToken(Text renewer) throws IOException, InterruptedException{
// client has already set the service
return client.getDelegationToken(renewer);
}
/**
* Renew a delegation token
* @param token the token to renew
* @return the new expiration time
* @throws InvalidToken
* @throws IOException
* @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws InvalidToken, IOException,
InterruptedException {
return token.renew(getConf());
}
/**
* Cancel a delegation token from the JobTracker
* @param token the token to cancel
* @throws IOException
* @deprecated Use {@link Token#cancel} instead
*/
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
) throws IOException,
InterruptedException {
token.cancel(getConf());
}
}
| 13,420 | 30.139211 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <code>MarkableIterator</code> is a wrapper iterator class that
* implements the {@link MarkableIteratorInterface}.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MarkableIterator<VALUE>
implements MarkableIteratorInterface<VALUE> {
MarkableIteratorInterface<VALUE> baseIterator;
/**
* Create a new iterator layered on the input iterator
* @param itr underlying iterator that implements MarkableIteratorInterface
*/
public MarkableIterator(Iterator<VALUE> itr) {
if (!(itr instanceof MarkableIteratorInterface)) {
throw new IllegalArgumentException("Input Iterator not markable");
}
baseIterator = (MarkableIteratorInterface<VALUE>) itr;
}
@Override
public void mark() throws IOException {
baseIterator.mark();
}
@Override
public void reset() throws IOException {
baseIterator.reset();
}
@Override
public void clearMark() throws IOException {
baseIterator.clearMark();
}
@Override
public boolean hasNext() {
return baseIterator.hasNext();
}
@Override
public VALUE next() {
return baseIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove Not Implemented");
}
}
| 2,274 | 27.4375 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* This is used to track task completion events on
* job tracker.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TaskCompletionEvent implements Writable{
@InterfaceAudience.Public
@InterfaceStability.Evolving
/**
* Task Completion Statuses
*/
static public enum Status {
/**
* Task Event Attempt failed but there are attempts remaining.
*/
FAILED,
/**
* Task Event was killed.
*/
KILLED,
/**
* Task Event was successful.
*/
SUCCEEDED,
/**
* Used to Override a previously successful event status.
* Example: Map attempt runs and a SUCCEEDED event is sent. Later a task
* is retroactively failed due to excessive fetch failure during shuffle
* phase. When the retroactive attempt failure occurs, an OBSOLETE event is
* sent for the map attempt indicating the prior event is no longer valid.
*/
OBSOLETE,
/**
* Task Event attempt failed and no further attempts exist.
* reached MAX attempts. When a reducer receives a TIPFAILED event it
* gives up trying to shuffle data from that map task.
*/
TIPFAILED
}
private int eventId;
private String taskTrackerHttp;
private int taskRunTime; // using int since runtime is the time difference
private TaskAttemptID taskId;
Status status;
boolean isMap = false;
private int idWithinJob;
public static final TaskCompletionEvent[] EMPTY_ARRAY =
new TaskCompletionEvent[0];
/**
* Default constructor for Writable.
*
*/
public TaskCompletionEvent(){
taskId = new TaskAttemptID();
}
/**
* Constructor. eventId should be created externally and incremented
* per event for each job.
* @param eventId event id, event id should be unique and assigned in
* incrementally, starting from 0.
* @param taskId task id
* @param status task's status
* @param taskTrackerHttp task tracker's host:port for http.
*/
public TaskCompletionEvent(int eventId,
TaskAttemptID taskId,
int idWithinJob,
boolean isMap,
Status status,
String taskTrackerHttp){
this.taskId = taskId;
this.idWithinJob = idWithinJob;
this.isMap = isMap;
this.eventId = eventId;
this.status =status;
this.taskTrackerHttp = taskTrackerHttp;
}
/**
* Returns event Id.
* @return event id
*/
public int getEventId() {
return eventId;
}
/**
* Returns task id.
* @return task id
*/
public TaskAttemptID getTaskAttemptId() {
return taskId;
}
/**
* Returns {@link Status}
* @return task completion status
*/
public Status getStatus() {
return status;
}
/**
* http location of the tasktracker where this task ran.
* @return http location of tasktracker user logs
*/
public String getTaskTrackerHttp() {
return taskTrackerHttp;
}
/**
* Returns time (in millisec) the task took to complete.
*/
public int getTaskRunTime() {
return taskRunTime;
}
/**
* Set the task completion time
* @param taskCompletionTime time (in millisec) the task took to complete
*/
protected void setTaskRunTime(int taskCompletionTime) {
this.taskRunTime = taskCompletionTime;
}
/**
* set event Id. should be assigned incrementally starting from 0.
* @param eventId
*/
protected void setEventId(int eventId) {
this.eventId = eventId;
}
/**
* Sets task id.
* @param taskId
*/
protected void setTaskAttemptId(TaskAttemptID taskId) {
this.taskId = taskId;
}
/**
* Set task status.
* @param status
*/
protected void setTaskStatus(Status status) {
this.status = status;
}
/**
* Set task tracker http location.
* @param taskTrackerHttp
*/
protected void setTaskTrackerHttp(String taskTrackerHttp) {
this.taskTrackerHttp = taskTrackerHttp;
}
@Override
public String toString(){
StringBuffer buf = new StringBuffer();
buf.append("Task Id : ");
buf.append(taskId);
buf.append(", Status : ");
buf.append(status.name());
return buf.toString();
}
@Override
public boolean equals(Object o) {
if(o == null)
return false;
if(o.getClass().equals(this.getClass())) {
TaskCompletionEvent event = (TaskCompletionEvent) o;
return this.isMap == event.isMapTask()
&& this.eventId == event.getEventId()
&& this.idWithinJob == event.idWithinJob()
&& this.status.equals(event.getStatus())
&& this.taskId.equals(event.getTaskAttemptId())
&& this.taskRunTime == event.getTaskRunTime()
&& this.taskTrackerHttp.equals(event.getTaskTrackerHttp());
}
return false;
}
@Override
public int hashCode() {
return toString().hashCode();
}
public boolean isMapTask() {
return isMap;
}
public int idWithinJob() {
return idWithinJob;
}
//////////////////////////////////////////////
// Writable
//////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
taskId.write(out);
WritableUtils.writeVInt(out, idWithinJob);
out.writeBoolean(isMap);
WritableUtils.writeEnum(out, status);
WritableUtils.writeString(out, taskTrackerHttp);
WritableUtils.writeVInt(out, taskRunTime);
WritableUtils.writeVInt(out, eventId);
}
public void readFields(DataInput in) throws IOException {
taskId.readFields(in);
idWithinJob = WritableUtils.readVInt(in);
isMap = in.readBoolean();
status = WritableUtils.readEnum(in, Status.class);
taskTrackerHttp = WritableUtils.readString(in);
taskRunTime = WritableUtils.readVInt(in);
eventId = WritableUtils.readVInt(in);
}
}
| 7,045 | 27.071713 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ReduceContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The context passed to the {@link Reducer}.
* @param <KEYIN> the class of the input keys
* @param <VALUEIN> the class of the input values
* @param <KEYOUT> the class of the output keys
* @param <VALUEOUT> the class of the output values
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface ReduceContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskInputOutputContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
/** Start processing next unique key. */
public boolean nextKey() throws IOException,InterruptedException;
/**
* Iterate through the values for the current key, reusing the same value
* object, which is stored in the context.
* @return the series of values associated with the current key. All of the
* objects returned directly and indirectly from this method are reused.
*/
public Iterable<VALUEIN> getValues() throws IOException, InterruptedException;
/**
* {@link Iterator} to iterate over values for a given group of records.
*/
interface ValueIterator<VALUEIN> extends MarkableIteratorInterface<VALUEIN> {
/**
* This method is called when the reducer moves from one key to
* another.
* @throws IOException
*/
void resetBackupStore() throws IOException;
}
}
| 2,290 | 35.365079 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MapContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The context that is given to the {@link Mapper}.
* @param <KEYIN> the key input type to the Mapper
* @param <VALUEIN> the value input type to the Mapper
* @param <KEYOUT> the key output type from the Mapper
* @param <VALUEOUT> the value output type from the Mapper
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface MapContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskInputOutputContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
/**
* Get the input split for this map.
*/
public InputSplit getInputSplit();
}
| 1,526 | 35.357143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A utility to manage job submission files.
*/
@InterfaceAudience.Private
public class JobSubmissionFiles {
private final static Log LOG = LogFactory.getLog(JobSubmissionFiles.class);
// job submission directory is private!
final public static FsPermission JOB_DIR_PERMISSION =
FsPermission.createImmutable((short) 0700); // rwx--------
//job files are world-wide readable and owner writable
final public static FsPermission JOB_FILE_PERMISSION =
FsPermission.createImmutable((short) 0644); // rw-r--r--
public static Path getJobSplitFile(Path jobSubmissionDir) {
return new Path(jobSubmissionDir, "job.split");
}
public static Path getJobSplitMetaFile(Path jobSubmissionDir) {
return new Path(jobSubmissionDir, "job.splitmetainfo");
}
/**
* Get the job conf path.
*/
public static Path getJobConfPath(Path jobSubmitDir) {
return new Path(jobSubmitDir, "job.xml");
}
/**
* Get the job jar path.
*/
public static Path getJobJar(Path jobSubmitDir) {
return new Path(jobSubmitDir, "job.jar");
}
/**
* Get the job distributed cache files path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheFiles(Path jobSubmitDir) {
return new Path(jobSubmitDir, "files");
}
/**
* Get the job distributed cache path for log4j properties.
* @param jobSubmitDir
*/
public static Path getJobLog4jFile(Path jobSubmitDir) {
return new Path(jobSubmitDir, "log4j");
}
/**
* Get the job distributed cache archives path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheArchives(Path jobSubmitDir) {
return new Path(jobSubmitDir, "archives");
}
/**
* Get the job distributed cache libjars path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheLibjars(Path jobSubmitDir) {
return new Path(jobSubmitDir, "libjars");
}
/**
* Initializes the staging directory and returns the path. It also
* keeps track of all necessary ownership and permissions
* @param cluster
* @param conf
*/
public static Path getStagingDir(Cluster cluster, Configuration conf)
throws IOException,InterruptedException {
Path stagingArea = cluster.getStagingAreaDir();
FileSystem fs = stagingArea.getFileSystem(conf);
String realUser;
String currentUser;
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
realUser = ugi.getShortUserName();
currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
if (fs.exists(stagingArea)) {
FileStatus fsStatus = fs.getFileStatus(stagingArea);
String owner = fsStatus.getOwner();
if (!(owner.equals(currentUser) || owner.equals(realUser))) {
throw new IOException("The ownership on the staging directory " +
stagingArea + " is not as expected. " +
"It is owned by " + owner + ". The directory must " +
"be owned by the submitter " + currentUser + " or " +
"by " + realUser);
}
if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
LOG.info("Permissions on staging directory " + stagingArea + " are " +
"incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
"to correct value " + JOB_DIR_PERMISSION);
fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
}
} else {
fs.mkdirs(stagingArea,
new FsPermission(JOB_DIR_PERMISSION));
}
return stagingArea;
}
}
| 4,826 | 33.478571 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskTrackerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
/**
* Information about TaskTracker.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TaskTrackerInfo implements Writable {
String name;
boolean isBlacklisted = false;
String reasonForBlacklist = "";
String blacklistReport = "";
public TaskTrackerInfo() {
}
// construct an active tracker
public TaskTrackerInfo(String name) {
this.name = name;
}
// construct blacklisted tracker
public TaskTrackerInfo(String name, String reasonForBlacklist,
String report) {
this.name = name;
this.isBlacklisted = true;
this.reasonForBlacklist = reasonForBlacklist;
this.blacklistReport = report;
}
/**
* Gets the tasktracker's name.
*
* @return tracker's name.
*/
public String getTaskTrackerName() {
return name;
}
/**
* Whether tracker is blacklisted
* @return true if tracker is blacklisted
* false otherwise
*/
public boolean isBlacklisted() {
return isBlacklisted;
}
/**
* Gets the reason for which the tasktracker was blacklisted.
*
* @return reason which tracker was blacklisted
*/
public String getReasonForBlacklist() {
return reasonForBlacklist;
}
/**
* Gets a descriptive report about why the tasktracker was blacklisted.
*
* @return report describing why the tasktracker was blacklisted.
*/
public String getBlacklistReport() {
return blacklistReport;
}
@Override
public void readFields(DataInput in) throws IOException {
name = Text.readString(in);
isBlacklisted = in.readBoolean();
reasonForBlacklist = Text.readString(in);
blacklistReport = Text.readString(in);
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, name);
out.writeBoolean(isBlacklisted);
Text.writeString(out, reasonForBlacklist);
Text.writeString(out, blacklistReport);
}
}
| 3,030 | 26.807339 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MarkableIteratorInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <code>MarkableIteratorInterface</code> is an interface for a iterator that
* supports mark-reset functionality.
*
* <p>Mark can be called at any point during the iteration process and a reset
* will go back to the last record before the call to the previous mark.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
interface MarkableIteratorInterface<VALUE> extends Iterator<VALUE> {
/**
* Mark the current record. A subsequent call to reset will rewind
* the iterator to this record.
* @throws IOException
*/
void mark() throws IOException;
/**
* Reset the iterator to the last record before a call to the previous mark
* @throws IOException
*/
void reset() throws IOException;
/**
* Clear any previously set mark
* @throws IOException
*/
void clearMark() throws IOException;
}
| 1,874 | 31.894737 | 78 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.