repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.utils;
import java.io.IOException;
import java.io.Serializable;
import java.net.InetSocketAddress;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* Builder utilities to construct various objects.
*
*/
@Private
public class BuilderUtils {
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
public static class ApplicationIdComparator implements
Comparator<ApplicationId>, Serializable {
@Override
public int compare(ApplicationId a1, ApplicationId a2) {
return a1.compareTo(a2);
}
}
public static class ContainerIdComparator implements
java.util.Comparator<ContainerId>, Serializable {
@Override
public int compare(ContainerId c1,
ContainerId c2) {
return c1.compareTo(c2);
}
}
public static LocalResource newLocalResource(URL url, LocalResourceType type,
LocalResourceVisibility visibility, long size, long timestamp,
boolean shouldBeUploadedToSharedCache) {
LocalResource resource =
recordFactory.newRecordInstance(LocalResource.class);
resource.setResource(url);
resource.setType(type);
resource.setVisibility(visibility);
resource.setSize(size);
resource.setTimestamp(timestamp);
resource.setShouldBeUploadedToSharedCache(shouldBeUploadedToSharedCache);
return resource;
}
public static LocalResource newLocalResource(URI uri,
LocalResourceType type, LocalResourceVisibility visibility, long size,
long timestamp, boolean shouldBeUploadedToSharedCache) {
return newLocalResource(ConverterUtils.getYarnUrlFromURI(uri), type,
visibility, size, timestamp, shouldBeUploadedToSharedCache);
}
public static ApplicationId newApplicationId(RecordFactory recordFactory,
long clustertimestamp, CharSequence id) {
return ApplicationId.newInstance(clustertimestamp,
Integer.parseInt(id.toString()));
}
public static ApplicationId newApplicationId(RecordFactory recordFactory,
long clusterTimeStamp, int id) {
return ApplicationId.newInstance(clusterTimeStamp, id);
}
public static ApplicationId newApplicationId(long clusterTimeStamp, int id) {
return ApplicationId.newInstance(clusterTimeStamp, id);
}
public static ApplicationAttemptId newApplicationAttemptId(
ApplicationId appId, int attemptId) {
return ApplicationAttemptId.newInstance(appId, attemptId);
}
public static ApplicationId convert(long clustertimestamp, CharSequence id) {
return ApplicationId.newInstance(clustertimestamp,
Integer.parseInt(id.toString()));
}
public static ContainerId newContainerId(ApplicationAttemptId appAttemptId,
long containerId) {
return ContainerId.newContainerId(appAttemptId, containerId);
}
public static ContainerId newContainerId(int appId, int appAttemptId,
long timestamp, long id) {
ApplicationId applicationId = newApplicationId(timestamp, appId);
ApplicationAttemptId applicationAttemptId = newApplicationAttemptId(
applicationId, appAttemptId);
ContainerId cId = newContainerId(applicationAttemptId, id);
return cId;
}
public static Token newContainerToken(ContainerId cId, String host,
int port, String user, Resource r, long expiryTime, int masterKeyId,
byte[] password, long rmIdentifier) throws IOException {
ContainerTokenIdentifier identifier =
new ContainerTokenIdentifier(cId, host + ":" + port, user, r,
expiryTime, masterKeyId, rmIdentifier, Priority.newInstance(0), 0);
return newContainerToken(BuilderUtils.newNodeId(host, port), password,
identifier);
}
public static ContainerId newContainerId(RecordFactory recordFactory,
ApplicationId appId, ApplicationAttemptId appAttemptId,
int containerId) {
return ContainerId.newContainerId(appAttemptId, containerId);
}
public static NodeId newNodeId(String host, int port) {
return NodeId.newInstance(host, port);
}
public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime) {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime, null);
}
public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels) {
NodeReport nodeReport = recordFactory.newRecordInstance(NodeReport.class);
nodeReport.setNodeId(nodeId);
nodeReport.setNodeState(nodeState);
nodeReport.setHttpAddress(httpAddress);
nodeReport.setRackName(rackName);
nodeReport.setUsed(used);
nodeReport.setCapability(capability);
nodeReport.setNumContainers(numContainers);
nodeReport.setHealthReport(healthReport);
nodeReport.setLastHealthReportTime(lastHealthReportTime);
nodeReport.setNodeLabels(nodeLabels);
return nodeReport;
}
public static ContainerStatus newContainerStatus(ContainerId containerId,
ContainerState containerState, String diagnostics, int exitStatus) {
ContainerStatus containerStatus = recordFactory
.newRecordInstance(ContainerStatus.class);
containerStatus.setState(containerState);
containerStatus.setContainerId(containerId);
containerStatus.setDiagnostics(diagnostics);
containerStatus.setExitStatus(exitStatus);
return containerStatus;
}
public static Container newContainer(ContainerId containerId, NodeId nodeId,
String nodeHttpAddress, Resource resource, Priority priority,
Token containerToken) {
Container container = recordFactory.newRecordInstance(Container.class);
container.setId(containerId);
container.setNodeId(nodeId);
container.setNodeHttpAddress(nodeHttpAddress);
container.setResource(resource);
container.setPriority(priority);
container.setContainerToken(containerToken);
return container;
}
public static <T extends Token> T newToken(Class<T> tokenClass,
byte[] identifier, String kind, byte[] password, String service) {
T token = recordFactory.newRecordInstance(tokenClass);
token.setIdentifier(ByteBuffer.wrap(identifier));
token.setKind(kind);
token.setPassword(ByteBuffer.wrap(password));
token.setService(service);
return token;
}
public static Token newDelegationToken(byte[] identifier,
String kind, byte[] password, String service) {
return newToken(Token.class, identifier, kind, password, service);
}
public static Token newClientToAMToken(byte[] identifier, String kind,
byte[] password, String service) {
return newToken(Token.class, identifier, kind, password, service);
}
public static Token newAMRMToken(byte[] identifier, String kind,
byte[] password, String service) {
return newToken(Token.class, identifier, kind, password, service);
}
@VisibleForTesting
public static Token newContainerToken(NodeId nodeId,
byte[] password, ContainerTokenIdentifier tokenIdentifier) {
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
// NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
Token containerToken =
newToken(Token.class, tokenIdentifier.getBytes(),
ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return containerToken;
}
public static ContainerTokenIdentifier newContainerTokenIdentifier(
Token containerToken) throws IOException {
org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token =
new org.apache.hadoop.security.token.Token<ContainerTokenIdentifier>(
containerToken.getIdentifier()
.array(), containerToken.getPassword().array(), new Text(
containerToken.getKind()),
new Text(containerToken.getService()));
return token.decodeIdentifier();
}
public static ContainerLaunchContext newContainerLaunchContext(
Map<String, LocalResource> localResources,
Map<String, String> environment, List<String> commands,
Map<String, ByteBuffer> serviceData, ByteBuffer tokens,
Map<ApplicationAccessType, String> acls) {
ContainerLaunchContext container = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
container.setLocalResources(localResources);
container.setEnvironment(environment);
container.setCommands(commands);
container.setServiceData(serviceData);
container.setTokens(tokens);
container.setApplicationACLs(acls);
return container;
}
public static Priority newPriority(int p) {
Priority priority = recordFactory.newRecordInstance(Priority.class);
priority.setPriority(p);
return priority;
}
public static ResourceRequest newResourceRequest(Priority priority,
String hostName, Resource capability, int numContainers) {
ResourceRequest request = recordFactory
.newRecordInstance(ResourceRequest.class);
request.setPriority(priority);
request.setResourceName(hostName);
request.setCapability(capability);
request.setNumContainers(numContainers);
return request;
}
public static ResourceRequest newResourceRequest(ResourceRequest r) {
ResourceRequest request = recordFactory
.newRecordInstance(ResourceRequest.class);
request.setPriority(r.getPriority());
request.setResourceName(r.getResourceName());
request.setCapability(r.getCapability());
request.setNumContainers(r.getNumContainers());
request.setNodeLabelExpression(r.getNodeLabelExpression());
return request;
}
public static ApplicationReport newApplicationReport(
ApplicationId applicationId, ApplicationAttemptId applicationAttemptId,
String user, String queue, String name, String host, int rpcPort,
Token clientToAMToken, YarnApplicationState state, String diagnostics,
String url, long startTime, long finishTime,
FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String appType, Token amRmToken, Set<String> tags,
Priority priority) {
ApplicationReport report = recordFactory
.newRecordInstance(ApplicationReport.class);
report.setApplicationId(applicationId);
report.setCurrentApplicationAttemptId(applicationAttemptId);
report.setUser(user);
report.setQueue(queue);
report.setName(name);
report.setHost(host);
report.setRpcPort(rpcPort);
report.setClientToAMToken(clientToAMToken);
report.setYarnApplicationState(state);
report.setDiagnostics(diagnostics);
report.setTrackingUrl(url);
report.setStartTime(startTime);
report.setFinishTime(finishTime);
report.setFinalApplicationStatus(finalStatus);
report.setApplicationResourceUsageReport(appResources);
report.setOriginalTrackingUrl(origTrackingUrl);
report.setProgress(progress);
report.setApplicationType(appType);
report.setAMRMToken(amRmToken);
report.setApplicationTags(tags);
report.setPriority(priority);
return report;
}
public static ApplicationSubmissionContext newApplicationSubmissionContext(
ApplicationId applicationId, String applicationName, String queue,
Priority priority, ContainerLaunchContext amContainer,
boolean isUnmanagedAM, boolean cancelTokensWhenComplete,
int maxAppAttempts, Resource resource, String applicationType) {
ApplicationSubmissionContext context =
recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
context.setApplicationId(applicationId);
context.setApplicationName(applicationName);
context.setQueue(queue);
context.setPriority(priority);
context.setAMContainerSpec(amContainer);
context.setUnmanagedAM(isUnmanagedAM);
context.setCancelTokensWhenComplete(cancelTokensWhenComplete);
context.setMaxAppAttempts(maxAppAttempts);
context.setResource(resource);
context.setApplicationType(applicationType);
return context;
}
public static ApplicationSubmissionContext newApplicationSubmissionContext(
ApplicationId applicationId, String applicationName, String queue,
Priority priority, ContainerLaunchContext amContainer,
boolean isUnmanagedAM, boolean cancelTokensWhenComplete,
int maxAppAttempts, Resource resource) {
return newApplicationSubmissionContext(applicationId, applicationName,
queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete,
maxAppAttempts, resource, null);
}
public static ApplicationResourceUsageReport newApplicationResourceUsageReport(
int numUsedContainers, int numReservedContainers, Resource usedResources,
Resource reservedResources, Resource neededResources, long memorySeconds,
long vcoreSeconds) {
ApplicationResourceUsageReport report =
recordFactory.newRecordInstance(ApplicationResourceUsageReport.class);
report.setNumUsedContainers(numUsedContainers);
report.setNumReservedContainers(numReservedContainers);
report.setUsedResources(usedResources);
report.setReservedResources(reservedResources);
report.setNeededResources(neededResources);
report.setMemorySeconds(memorySeconds);
report.setVcoreSeconds(vcoreSeconds);
return report;
}
public static Resource newResource(int memory, int vCores) {
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(vCores);
return resource;
}
public static URL newURL(String scheme, String host, int port, String file) {
URL url = recordFactory.newRecordInstance(URL.class);
url.setScheme(scheme);
url.setHost(host);
url.setPort(port);
url.setFile(file);
return url;
}
public static AllocateResponse newAllocateResponse(int responseId,
List<ContainerStatus> completedContainers,
List<Container> allocatedContainers, List<NodeReport> updatedNodes,
Resource availResources, AMCommand command, int numClusterNodes,
PreemptionMessage preempt) {
AllocateResponse response = recordFactory
.newRecordInstance(AllocateResponse.class);
response.setNumClusterNodes(numClusterNodes);
response.setResponseId(responseId);
response.setCompletedContainersStatuses(completedContainers);
response.setAllocatedContainers(allocatedContainers);
response.setUpdatedNodes(updatedNodes);
response.setAvailableResources(availResources);
response.setAMCommand(command);
response.setPreemptionMessage(preempt);
return response;
}
}
| 18,045 | 40.389908 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/LeveldbIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.utils;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.DBIterator;
import org.iq80.leveldb.ReadOptions;
/**
* A wrapper for a DBIterator to translate the raw RuntimeExceptions that
* can be thrown into DBExceptions.
*/
@Public
@Evolving
public class LeveldbIterator implements Iterator<Map.Entry<byte[], byte[]>>,
Closeable {
private DBIterator iter;
/**
* Create an iterator for the specified database
*/
public LeveldbIterator(DB db) {
iter = db.iterator();
}
/**
* Create an iterator for the specified database
*/
public LeveldbIterator(DB db, ReadOptions options) {
iter = db.iterator(options);
}
/**
* Create an iterator using the specified underlying DBIterator
*/
public LeveldbIterator(DBIterator iter) {
this.iter = iter;
}
/**
* Repositions the iterator so the key of the next BlockElement
* returned greater than or equal to the specified targetKey.
*/
public void seek(byte[] key) throws DBException {
try {
iter.seek(key);
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Repositions the iterator so is is at the beginning of the Database.
*/
public void seekToFirst() throws DBException {
try {
iter.seekToFirst();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Repositions the iterator so it is at the end of of the Database.
*/
public void seekToLast() throws DBException {
try {
iter.seekToLast();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Returns <tt>true</tt> if the iteration has more elements.
*/
public boolean hasNext() throws DBException {
try {
return iter.hasNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Returns the next element in the iteration.
*/
@Override
public Map.Entry<byte[], byte[]> next() throws DBException {
try {
return iter.next();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Returns the next element in the iteration, without advancing the
* iteration.
*/
public Map.Entry<byte[], byte[]> peekNext() throws DBException {
try {
return iter.peekNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* @return true if there is a previous entry in the iteration.
*/
public boolean hasPrev() throws DBException {
try {
return iter.hasPrev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* @return the previous element in the iteration and rewinds the iteration.
*/
public Map.Entry<byte[], byte[]> prev() throws DBException {
try {
return iter.prev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* @return the previous element in the iteration, without rewinding the
* iteration.
*/
public Map.Entry<byte[], byte[]> peekPrev() throws DBException {
try {
return iter.peekPrev();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Removes from the database the last element returned by the iterator.
*/
@Override
public void remove() throws DBException {
try {
iter.remove();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
}
/**
* Closes the iterator.
*/
@Override
public void close() throws IOException {
try {
iter.close();
} catch (RuntimeException e) {
throw new IOException(e.getMessage(), e);
}
}
}
| 5,400 | 24.597156 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/Lock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.utils;
import java.lang.annotation.Documented;
/**
* Annotation to document locking order.
*/
@Documented public @interface Lock {
@SuppressWarnings({ "rawtypes" })
Class[] value();
public class NoLock{}
}
| 1,064 | 33.354839 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class TestApplicationHistoryManagerOnTimelineStore {
private static final int SCALE = 5;
private static TimelineStore store;
private ApplicationHistoryManagerOnTimelineStore historyManager;
private UserGroupInformation callerUGI;
private Configuration conf;
@BeforeClass
public static void prepareStore() throws Exception {
store = createStore(SCALE);
TimelineEntities entities = new TimelineEntities();
entities.addEntity(createApplicationTimelineEntity(
ApplicationId.newInstance(0, SCALE + 1), true, true, false));
entities.addEntity(createApplicationTimelineEntity(
ApplicationId.newInstance(0, SCALE + 2), true, false, true));
store.put(entities);
}
public static TimelineStore createStore(int scale) throws Exception {
TimelineStore store = new MemoryTimelineStore();
prepareTimelineStore(store, scale);
return store;
}
@Before
public void setup() throws Exception {
// Only test the ACLs of the generic history
TimelineACLsManager aclsManager = new TimelineACLsManager(new YarnConfiguration());
TimelineDataManager dataManager =
new TimelineDataManager(store, aclsManager);
dataManager.init(conf);
ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
historyManager =
new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
historyManager.init(conf);
historyManager.start();
}
@After
public void tearDown() {
if (historyManager != null) {
historyManager.stop();
}
}
@Parameters
public static Collection<Object[]> callers() {
// user1 is the owner
// user2 is the authorized user
// user3 is the unauthorized user
// admin is the admin acl
return Arrays.asList(
new Object[][] { { "" }, { "user1" }, { "user2" }, { "user3" }, { "admin" } });
}
public TestApplicationHistoryManagerOnTimelineStore(String caller) {
conf = new YarnConfiguration();
if (!caller.equals("")) {
callerUGI = UserGroupInformation.createRemoteUser(caller, AuthMethod.SIMPLE);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
}
}
private static void prepareTimelineStore(TimelineStore store, int scale)
throws Exception {
for (int i = 1; i <= scale; ++i) {
TimelineEntities entities = new TimelineEntities();
ApplicationId appId = ApplicationId.newInstance(0, i);
if (i == 2) {
entities.addEntity(createApplicationTimelineEntity(
appId, true, false, false));
} else {
entities.addEntity(createApplicationTimelineEntity(
appId, false, false, false));
}
store.put(entities);
for (int j = 1; j <= scale; ++j) {
entities = new TimelineEntities();
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
entities.addEntity(createAppAttemptTimelineEntity(appAttemptId));
store.put(entities);
for (int k = 1; k <= scale; ++k) {
entities = new TimelineEntities();
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
entities.addEntity(createContainerEntity(containerId));
store.put(entities);
}
}
}
}
@Test
public void testGetApplicationReport() throws Exception {
for (int i = 1; i <= 2; ++i) {
final ApplicationId appId = ApplicationId.newInstance(0, i);
ApplicationReport app;
if (callerUGI == null) {
app = historyManager.getApplication(appId);
} else {
app =
callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport> () {
@Override
public ApplicationReport run() throws Exception {
return historyManager.getApplication(appId);
}
});
}
Assert.assertNotNull(app);
Assert.assertEquals(appId, app.getApplicationId());
Assert.assertEquals("test app", app.getName());
Assert.assertEquals("test app type", app.getApplicationType());
Assert.assertEquals("user1", app.getUser());
Assert.assertEquals("test queue", app.getQueue());
Assert.assertEquals(Integer.MAX_VALUE + 2L
+ app.getApplicationId().getId(), app.getStartTime());
Assert.assertEquals(Integer.MAX_VALUE + 3L
+ +app.getApplicationId().getId(), app.getFinishTime());
Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001);
Assert.assertEquals(2, app.getApplicationTags().size());
Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_1"));
Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_2"));
// App 2 doesn't have the ACLs, such that the default ACLs " " will be used.
// Nobody except admin and owner has access to the details of the app.
if ((i == 1 && callerUGI != null &&
callerUGI.getShortUserName().equals("user3")) ||
(i == 2 && callerUGI != null &&
(callerUGI.getShortUserName().equals("user2") ||
callerUGI.getShortUserName().equals("user3")))) {
Assert.assertEquals(ApplicationAttemptId.newInstance(appId, -1),
app.getCurrentApplicationAttemptId());
Assert.assertEquals(ApplicationHistoryManagerOnTimelineStore.UNAVAILABLE,
app.getHost());
Assert.assertEquals(-1, app.getRpcPort());
Assert.assertEquals(ApplicationHistoryManagerOnTimelineStore.UNAVAILABLE,
app.getTrackingUrl());
Assert.assertEquals(ApplicationHistoryManagerOnTimelineStore.UNAVAILABLE,
app.getOriginalTrackingUrl());
Assert.assertEquals("", app.getDiagnostics());
} else {
Assert.assertEquals(ApplicationAttemptId.newInstance(appId, 1),
app.getCurrentApplicationAttemptId());
Assert.assertEquals("test host", app.getHost());
Assert.assertEquals(100, app.getRpcPort());
Assert.assertEquals("test tracking url", app.getTrackingUrl());
Assert.assertEquals("test original tracking url",
app.getOriginalTrackingUrl());
Assert.assertEquals("test diagnostics info", app.getDiagnostics());
}
ApplicationResourceUsageReport applicationResourceUsageReport =
app.getApplicationResourceUsageReport();
Assert.assertEquals(123,
applicationResourceUsageReport.getMemorySeconds());
Assert
.assertEquals(345, applicationResourceUsageReport.getVcoreSeconds());
Assert.assertEquals(FinalApplicationStatus.UNDEFINED,
app.getFinalApplicationStatus());
Assert.assertEquals(YarnApplicationState.FINISHED,
app.getYarnApplicationState());
}
}
@Test
public void testGetApplicationReportWithNotAttempt() throws Exception {
final ApplicationId appId = ApplicationId.newInstance(0, SCALE + 1);
ApplicationReport app;
if (callerUGI == null) {
app = historyManager.getApplication(appId);
} else {
app =
callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport> () {
@Override
public ApplicationReport run() throws Exception {
return historyManager.getApplication(appId);
}
});
}
Assert.assertNotNull(app);
Assert.assertEquals(appId, app.getApplicationId());
Assert.assertEquals(ApplicationAttemptId.newInstance(appId, -1),
app.getCurrentApplicationAttemptId());
}
@Test
public void testGetApplicationAttemptReport() throws Exception {
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1);
ApplicationAttemptReport appAttempt;
if (callerUGI == null) {
appAttempt = historyManager.getApplicationAttempt(appAttemptId);
} else {
try {
appAttempt =
callerUGI.doAs(new PrivilegedExceptionAction<ApplicationAttemptReport> () {
@Override
public ApplicationAttemptReport run() throws Exception {
return historyManager.getApplicationAttempt(appAttemptId);
}
});
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
Assert.fail();
}
} catch (AuthorizationException e) {
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
return;
}
throw e;
}
}
Assert.assertNotNull(appAttempt);
Assert.assertEquals(appAttemptId, appAttempt.getApplicationAttemptId());
Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
appAttempt.getAMContainerId());
Assert.assertEquals("test host", appAttempt.getHost());
Assert.assertEquals(100, appAttempt.getRpcPort());
Assert.assertEquals("test tracking url", appAttempt.getTrackingUrl());
Assert.assertEquals("test original tracking url",
appAttempt.getOriginalTrackingUrl());
Assert.assertEquals("test diagnostics info", appAttempt.getDiagnostics());
Assert.assertEquals(YarnApplicationAttemptState.FINISHED,
appAttempt.getYarnApplicationAttemptState());
}
@Test
public void testGetContainerReport() throws Exception {
final ContainerId containerId =
ContainerId.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0, 1), 1), 1);
ContainerReport container;
if (callerUGI == null) {
container = historyManager.getContainer(containerId);
} else {
try {
container =
callerUGI.doAs(new PrivilegedExceptionAction<ContainerReport> () {
@Override
public ContainerReport run() throws Exception {
return historyManager.getContainer(containerId);
}
});
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
Assert.fail();
}
} catch (AuthorizationException e) {
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
return;
}
throw e;
}
}
Assert.assertNotNull(container);
Assert.assertEquals(Integer.MAX_VALUE + 1L, container.getCreationTime());
Assert.assertEquals(Integer.MAX_VALUE + 2L, container.getFinishTime());
Assert.assertEquals(Resource.newInstance(-1, -1),
container.getAllocatedResource());
Assert.assertEquals(NodeId.newInstance("test host", 100),
container.getAssignedNode());
Assert.assertEquals(Priority.UNDEFINED, container.getPriority());
Assert
.assertEquals("test diagnostics info", container.getDiagnosticsInfo());
Assert.assertEquals(ContainerState.COMPLETE, container.getContainerState());
Assert.assertEquals(-1, container.getContainerExitStatus());
Assert.assertEquals("http://0.0.0.0:8188/applicationhistory/logs/" +
"test host:100/container_0_0001_01_000001/"
+ "container_0_0001_01_000001/user1", container.getLogUrl());
}
@Test
public void testGetApplications() throws Exception {
Collection<ApplicationReport> apps =
historyManager.getApplications(Long.MAX_VALUE, 0L, Long.MAX_VALUE)
.values();
Assert.assertNotNull(apps);
Assert.assertEquals(SCALE + 1, apps.size());
ApplicationId ignoredAppId = ApplicationId.newInstance(0, SCALE + 2);
for (ApplicationReport app : apps) {
Assert.assertNotEquals(ignoredAppId, app.getApplicationId());
}
// Get apps by given appStartedTime period
apps =
historyManager.getApplications(Long.MAX_VALUE, 2147483653L,
Long.MAX_VALUE).values();
Assert.assertNotNull(apps);
Assert.assertEquals(2, apps.size());
}
@Test
public void testGetApplicationAttempts() throws Exception {
final ApplicationId appId = ApplicationId.newInstance(0, 1);
Collection<ApplicationAttemptReport> appAttempts;
if (callerUGI == null) {
appAttempts = historyManager.getApplicationAttempts(appId).values();
} else {
try {
appAttempts = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
@Override
public Collection<ApplicationAttemptReport> run() throws Exception {
return historyManager.getApplicationAttempts(appId).values();
}
});
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
Assert.fail();
}
} catch (AuthorizationException e) {
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
return;
}
throw e;
}
}
Assert.assertNotNull(appAttempts);
Assert.assertEquals(SCALE, appAttempts.size());
}
@Test
public void testGetContainers() throws Exception {
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1);
Collection<ContainerReport> containers;
if (callerUGI == null) {
containers = historyManager.getContainers(appAttemptId).values();
} else {
try {
containers = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ContainerReport>> () {
@Override
public Collection<ContainerReport> run() throws Exception {
return historyManager.getContainers(appAttemptId).values();
}
});
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
Assert.fail();
}
} catch (AuthorizationException e) {
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
return;
}
throw e;
}
}
Assert.assertNotNull(containers);
Assert.assertEquals(SCALE, containers.size());
}
@Test
public void testGetAMContainer() throws Exception {
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1);
ContainerReport container;
if (callerUGI == null) {
container = historyManager.getAMContainer(appAttemptId);
} else {
try {
container =
callerUGI.doAs(new PrivilegedExceptionAction<ContainerReport> () {
@Override
public ContainerReport run() throws Exception {
return historyManager.getAMContainer(appAttemptId);
}
});
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
Assert.fail();
}
} catch (AuthorizationException e) {
if (callerUGI != null && callerUGI.getShortUserName().equals("user3")) {
// The exception is expected
return;
}
throw e;
}
}
Assert.assertNotNull(container);
Assert.assertEquals(appAttemptId, container.getContainerId()
.getApplicationAttemptId());
}
private static TimelineEntity createApplicationTimelineEntity(
ApplicationId appId, boolean emptyACLs, boolean noAttemptId,
boolean wrongAppId) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE);
if (wrongAppId) {
entity.setEntityId("wrong_app_id");
} else {
entity.setEntityId(appId.toString());
}
entity.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER.toString(), "yarn");
Map<String, Object> entityInfo = new HashMap<String, Object>();
entityInfo.put(ApplicationMetricsConstants.NAME_ENTITY_INFO, "test app");
entityInfo.put(ApplicationMetricsConstants.TYPE_ENTITY_INFO,
"test app type");
entityInfo.put(ApplicationMetricsConstants.USER_ENTITY_INFO, "user1");
entityInfo.put(ApplicationMetricsConstants.QUEUE_ENTITY_INFO, "test queue");
entityInfo.put(
ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO, "false");
entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO,
Integer.MAX_VALUE + 1L);
entityInfo.put(ApplicationMetricsConstants.APP_MEM_METRICS,123);
entityInfo.put(ApplicationMetricsConstants.APP_CPU_METRICS,345);
if (emptyACLs) {
entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO, "");
} else {
entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO,
"user2");
}
Set<String> appTags = new HashSet<String>();
appTags.add("Test_APP_TAGS_1");
appTags.add("Test_APP_TAGS_2");
entityInfo.put(ApplicationMetricsConstants.APP_TAGS_INFO, appTags);
entity.setOtherInfo(entityInfo);
TimelineEvent tEvent = new TimelineEvent();
tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 2L + appId.getId());
entity.addEvent(tEvent);
tEvent = new TimelineEvent();
tEvent.setEventType(
ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 3L + appId.getId());
Map<String, Object> eventInfo = new HashMap<String, Object>();
eventInfo.put(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO,
"test diagnostics info");
eventInfo.put(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO,
FinalApplicationStatus.UNDEFINED.toString());
eventInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO,
YarnApplicationState.FINISHED.toString());
if (!noAttemptId) {
eventInfo.put(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO,
ApplicationAttemptId.newInstance(appId, 1));
}
tEvent.setEventInfo(eventInfo);
entity.addEvent(tEvent);
return entity;
}
private static TimelineEntity createAppAttemptTimelineEntity(
ApplicationAttemptId appAttemptId) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityType(AppAttemptMetricsConstants.ENTITY_TYPE);
entity.setEntityId(appAttemptId.toString());
entity.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entity.addPrimaryFilter(AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER,
appAttemptId.getApplicationId().toString());
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER.toString(), "yarn");
TimelineEvent tEvent = new TimelineEvent();
tEvent.setEventType(AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 1L);
Map<String, Object> eventInfo = new HashMap<String, Object>();
eventInfo.put(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO,
"test tracking url");
eventInfo.put(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO,
"test original tracking url");
eventInfo.put(AppAttemptMetricsConstants.HOST_EVENT_INFO, "test host");
eventInfo.put(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO, 100);
eventInfo.put(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO,
ContainerId.newContainerId(appAttemptId, 1));
tEvent.setEventInfo(eventInfo);
entity.addEvent(tEvent);
tEvent = new TimelineEvent();
tEvent.setEventType(AppAttemptMetricsConstants.FINISHED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 2L);
eventInfo = new HashMap<String, Object>();
eventInfo.put(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO,
"test tracking url");
eventInfo.put(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO,
"test original tracking url");
eventInfo.put(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO,
"test diagnostics info");
eventInfo.put(AppAttemptMetricsConstants.FINAL_STATUS_EVENT_INFO,
FinalApplicationStatus.UNDEFINED.toString());
eventInfo.put(AppAttemptMetricsConstants.STATE_EVENT_INFO,
YarnApplicationAttemptState.FINISHED.toString());
tEvent.setEventInfo(eventInfo);
entity.addEvent(tEvent);
return entity;
}
private static TimelineEntity createContainerEntity(ContainerId containerId) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityType(ContainerMetricsConstants.ENTITY_TYPE);
entity.setEntityId(containerId.toString());
entity.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entity.addPrimaryFilter(ContainerMetricsConstants.PARENT_PRIMARIY_FILTER,
containerId.getApplicationAttemptId().toString());
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER.toString(), "yarn");
Map<String, Object> entityInfo = new HashMap<String, Object>();
entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO, -1);
entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO, -1);
entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO,
"test host");
entityInfo.put(ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO, 100);
entityInfo
.put(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO, -1);
entity.setOtherInfo(entityInfo);
TimelineEvent tEvent = new TimelineEvent();
tEvent.setEventType(ContainerMetricsConstants.CREATED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 1L);
entity.addEvent(tEvent);
;
tEvent = new TimelineEvent();
tEvent.setEventType(ContainerMetricsConstants.FINISHED_EVENT_TYPE);
tEvent.setTimestamp(Integer.MAX_VALUE + 2L);
Map<String, Object> eventInfo = new HashMap<String, Object>();
eventInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO,
"test diagnostics info");
eventInfo.put(ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO, -1);
eventInfo.put(ContainerMetricsConstants.STATE_EVENT_INFO,
ContainerState.COMPLETE.toString());
tEvent.setEventInfo(eventInfo);
entity.addEvent(tEvent);
return entity;
}
}
| 25,423 | 41.444073 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestApplicationHistoryManagerImpl extends
ApplicationHistoryStoreTestUtils {
ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null;
@Before
public void setup() throws Exception {
Configuration config = new Configuration();
config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
applicationHistoryManagerImpl.init(config);
applicationHistoryManagerImpl.start();
store = applicationHistoryManagerImpl.getHistoryStore();
}
@After
public void tearDown() throws Exception {
applicationHistoryManagerImpl.stop();
}
@Test
public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, 1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
ApplicationReport appReport =
applicationHistoryManagerImpl.getApplication(appId);
Assert.assertNotNull(appReport);
Assert.assertEquals(appId, appReport.getApplicationId());
Assert.assertEquals(appAttemptId,
appReport.getCurrentApplicationAttemptId());
Assert.assertEquals(appAttemptId.toString(), appReport.getHost());
Assert.assertEquals("test type", appReport.getApplicationType().toString());
Assert.assertEquals("test queue", appReport.getQueue().toString());
}
}
| 3,018 | 39.253333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.junit.Assert;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestFileSystemApplicationHistoryStore extends
ApplicationHistoryStoreTestUtils {
private static Log LOG = LogFactory
.getLog(TestFileSystemApplicationHistoryStore.class.getName());
private FileSystem fs;
private Path fsWorkingPath;
@Before
public void setup() throws Exception {
fs = new RawLocalFileSystem();
initAndStartStore(fs);
}
private void initAndStartStore(final FileSystem fs) throws IOException,
URISyntaxException {
Configuration conf = new Configuration();
fs.initialize(new URI("/"), conf);
fsWorkingPath =
new Path("target",
TestFileSystemApplicationHistoryStore.class.getSimpleName());
fs.delete(fsWorkingPath, true);
conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
fsWorkingPath.toString());
store = new FileSystemApplicationHistoryStore() {
@Override
protected FileSystem getFileSystem(Path path, Configuration conf) {
return fs;
}
};
store.init(conf);
store.start();
}
@After
public void tearDown() throws Exception {
store.stop();
fs.delete(fsWorkingPath, true);
fs.close();
}
@Test
public void testReadWriteHistoryData() throws IOException {
LOG.info("Starting testReadWriteHistoryData");
testWriteHistoryData(5);
testReadHistoryData(5);
}
private void testWriteHistoryData(int num) throws IOException {
testWriteHistoryData(num, false, false);
}
private void testWriteHistoryData(
int num, boolean missingContainer, boolean missingApplicationAttempt)
throws IOException {
// write application history data
for (int i = 1; i <= num; ++i) {
ApplicationId appId = ApplicationId.newInstance(0, i);
writeApplicationStartData(appId);
// write application attempt history data
for (int j = 1; j <= num; ++j) {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
writeApplicationAttemptStartData(appAttemptId);
if (missingApplicationAttempt && j == num) {
continue;
}
// write container history data
for (int k = 1; k <= num; ++k) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
if (missingContainer && k == num) {
continue;
}
writeContainerFinishData(containerId);
}
writeApplicationAttemptFinishData(appAttemptId);
}
writeApplicationFinishData(appId);
}
}
private void testReadHistoryData(int num) throws IOException {
testReadHistoryData(num, false, false);
}
@SuppressWarnings("deprecation")
private void testReadHistoryData(
int num, boolean missingContainer, boolean missingApplicationAttempt)
throws IOException {
// read application history data
Assert.assertEquals(num, store.getAllApplications().size());
for (int i = 1; i <= num; ++i) {
ApplicationId appId = ApplicationId.newInstance(0, i);
ApplicationHistoryData appData = store.getApplication(appId);
Assert.assertNotNull(appData);
Assert.assertEquals(appId.toString(), appData.getApplicationName());
Assert.assertEquals(appId.toString(), appData.getDiagnosticsInfo());
// read application attempt history data
Assert.assertEquals(num, store.getApplicationAttempts(appId).size());
for (int j = 1; j <= num; ++j) {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
ApplicationAttemptHistoryData attemptData =
store.getApplicationAttempt(appAttemptId);
Assert.assertNotNull(attemptData);
Assert.assertEquals(appAttemptId.toString(), attemptData.getHost());
if (missingApplicationAttempt && j == num) {
Assert.assertNull(attemptData.getDiagnosticsInfo());
continue;
} else {
Assert.assertEquals(appAttemptId.toString(),
attemptData.getDiagnosticsInfo());
}
// read container history data
Assert.assertEquals(num, store.getContainers(appAttemptId).size());
for (int k = 1; k <= num; ++k) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
ContainerHistoryData containerData = store.getContainer(containerId);
Assert.assertNotNull(containerData);
Assert.assertEquals(Priority.newInstance(containerId.getId()),
containerData.getPriority());
if (missingContainer && k == num) {
Assert.assertNull(containerData.getDiagnosticsInfo());
} else {
Assert.assertEquals(containerId.toString(),
containerData.getDiagnosticsInfo());
}
}
ContainerHistoryData masterContainer =
store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
masterContainer.getContainerId());
}
}
}
@Test
public void testWriteAfterApplicationFinish() throws IOException {
LOG.info("Starting testWriteAfterApplicationFinish");
ApplicationId appId = ApplicationId.newInstance(0, 1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
// write application attempt history data
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
// write container history data
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerStartData(containerId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
}
@Test
public void testMassiveWriteContainerHistoryData() throws IOException {
LOG.info("Starting testMassiveWriteContainerHistoryData");
long mb = 1024 * 1024;
long usedDiskBefore = fs.getContentSummary(fsWorkingPath).getLength() / mb;
ApplicationId appId = ApplicationId.newInstance(0, 1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
for (int i = 1; i <= 100000; ++i) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
writeApplicationFinishData(appId);
long usedDiskAfter = fs.getContentSummary(fsWorkingPath).getLength() / mb;
Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
}
@Test
public void testMissingContainerHistoryData() throws IOException {
LOG.info("Starting testMissingContainerHistoryData");
testWriteHistoryData(3, true, false);
testReadHistoryData(3, true, false);
}
@Test
public void testMissingApplicationAttemptHistoryData() throws IOException {
LOG.info("Starting testMissingApplicationAttemptHistoryData");
testWriteHistoryData(3, false, true);
testReadHistoryData(3, false, true);
}
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
tearDown();
// Setup file system to inject startup conditions
FileSystem fs = spy(new RawLocalFileSystem());
doReturn(true).when(fs).isDirectory(any(Path.class));
try {
initAndStartStore(fs);
} catch (Exception e) {
Assert.fail("Exception should not be thrown: " + e);
}
// Make sure that directory creation was not attempted
verify(fs, times(1)).isDirectory(any(Path.class));
verify(fs, times(0)).mkdirs(any(Path.class));
}
@Test
public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
tearDown();
// Setup file system to inject startup conditions
FileSystem fs = spy(new RawLocalFileSystem());
doReturn(false).when(fs).isDirectory(any(Path.class));
doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
try {
initAndStartStore(fs);
Assert.fail("Exception should have been thrown");
} catch (Exception e) {
// Expected failure
}
// Make sure that directory creation was attempted
verify(fs, times(1)).isDirectory(any(Path.class));
verify(fs, times(1)).mkdirs(any(Path.class));
}
}
| 11,184 | 35.314935 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.lib.StaticUserWebFilter;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.recovery.MemoryTimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class TestApplicationHistoryServer {
// simple test init/start/stop ApplicationHistoryServer. Status should change.
@Test(timeout = 60000)
public void testStartStopServer() throws Exception {
ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
MemoryTimelineStateStore.class, TimelineStateStore.class);
config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:0");
try {
try {
historyServer.init(config);
config.setInt(YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
0);
historyServer.start();
fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT));
}
config.setInt(YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT);
historyServer = new ApplicationHistoryServer();
historyServer.init(config);
assertEquals(STATE.INITED, historyServer.getServiceState());
assertEquals(5, historyServer.getServices().size());
ApplicationHistoryClientService historyService =
historyServer.getClientService();
assertNotNull(historyServer.getClientService());
assertEquals(STATE.INITED, historyService.getServiceState());
historyServer.start();
assertEquals(STATE.STARTED, historyServer.getServiceState());
assertEquals(STATE.STARTED, historyService.getServiceState());
historyServer.stop();
assertEquals(STATE.STOPPED, historyServer.getServiceState());
} finally {
historyServer.stop();
}
}
// test launch method
@Test(timeout = 60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
//test launch method with -D arguments
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
String[] args = new String[2];
args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(args);
Configuration conf = historyServer.getConfig();
assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
}
@Test(timeout = 240000)
public void testFilterOverrides() throws Exception {
HashMap<String, String> driver = new HashMap<String, String>();
driver.put("", TimelineAuthenticationFilterInitializer.class.getName());
driver.put(StaticUserWebFilter.class.getName(),
TimelineAuthenticationFilterInitializer.class.getName() + ","
+ StaticUserWebFilter.class.getName());
driver.put(AuthenticationFilterInitializer.class.getName(),
TimelineAuthenticationFilterInitializer.class.getName());
driver.put(TimelineAuthenticationFilterInitializer.class.getName(),
TimelineAuthenticationFilterInitializer.class.getName());
driver.put(AuthenticationFilterInitializer.class.getName() + ","
+ TimelineAuthenticationFilterInitializer.class.getName(),
TimelineAuthenticationFilterInitializer.class.getName());
driver.put(AuthenticationFilterInitializer.class.getName() + ", "
+ TimelineAuthenticationFilterInitializer.class.getName(),
TimelineAuthenticationFilterInitializer.class.getName());
for (Map.Entry<String, String> entry : driver.entrySet()) {
String filterInitializer = entry.getKey();
String expectedValue = entry.getValue();
ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
MemoryTimelineStateStore.class, TimelineStateStore.class);
config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:0");
try {
config.set("hadoop.http.filter.initializers", filterInitializer);
historyServer.init(config);
historyServer.start();
Configuration tmp = historyServer.getConfig();
assertEquals(expectedValue, tmp.get("hadoop.http.filter.initializers"));
} finally {
historyServer.stop();
}
}
}
}
| 7,614 | 42.022599 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestApplicationHistoryClientService {
private static ApplicationHistoryClientService clientService;
private static TimelineDataManager dataManager;
private final static int MAX_APPS = 2;
@BeforeClass
public static void setup() throws Exception {
Configuration conf = new YarnConfiguration();
TimelineStore store =
TestApplicationHistoryManagerOnTimelineStore.createStore(MAX_APPS);
TimelineACLsManager aclsManager = new TimelineACLsManager(conf);
dataManager =
new TimelineDataManager(store, aclsManager);
dataManager.init(conf);
ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
ApplicationHistoryManagerOnTimelineStore historyManager =
new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
historyManager.init(conf);
historyManager.start();
clientService = new ApplicationHistoryClientService(historyManager);
}
@Test
public void testApplicationNotFound() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, MAX_APPS + 1);
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(appId);
try {
@SuppressWarnings("unused")
GetApplicationReportResponse response =
clientService.getApplicationReport(request);
Assert.fail("Exception should have been thrown before we reach here.");
} catch (ApplicationNotFoundException e) {
//This exception is expected.
Assert.assertTrue(e.getMessage().contains(
"doesn't exist in the timeline store"));
} catch (Exception e) {
Assert.fail("Undesired exception caught");
}
}
@Test
public void testApplicationAttemptNotFound() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, MAX_APPS + 1);
GetApplicationAttemptReportRequest request =
GetApplicationAttemptReportRequest.newInstance(appAttemptId);
try {
@SuppressWarnings("unused")
GetApplicationAttemptReportResponse response =
clientService.getApplicationAttemptReport(request);
Assert.fail("Exception should have been thrown before we reach here.");
} catch (ApplicationAttemptNotFoundException e) {
//This Exception is expected
System.out.println(e.getMessage());
Assert.assertTrue(e.getMessage().contains(
"doesn't exist in the timeline store"));
} catch (Exception e) {
Assert.fail("Undesired exception caught");
}
}
@Test
public void testContainerNotFound() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId,
MAX_APPS + 1);
GetContainerReportRequest request =
GetContainerReportRequest.newInstance(containerId);
try {
@SuppressWarnings("unused")
GetContainerReportResponse response =
clientService.getContainerReport(request);
} catch (ContainerNotFoundException e) {
//This exception is expected
Assert.assertTrue(e.getMessage().contains(
"doesn't exist in the timeline store"));
} catch (Exception e) {
Assert.fail("Undesired exception caught");
}
}
@Test
public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, 1);
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response =
clientService.getApplicationReport(request);
ApplicationReport appReport = response.getApplicationReport();
Assert.assertNotNull(appReport);
Assert.assertEquals(123, appReport.getApplicationResourceUsageReport()
.getMemorySeconds());
Assert.assertEquals(345, appReport.getApplicationResourceUsageReport()
.getVcoreSeconds());
Assert.assertEquals("application_0_0001", appReport.getApplicationId()
.toString());
Assert.assertEquals("test app type",
appReport.getApplicationType().toString());
Assert.assertEquals("test queue", appReport.getQueue().toString());
}
@Test
public void testApplications() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, 1);
ApplicationId appId1 = ApplicationId.newInstance(0, 2);
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
GetApplicationsResponse response =
clientService.getApplications(request);
List<ApplicationReport> appReport = response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertEquals(appId, appReport.get(1).getApplicationId());
Assert.assertEquals(appId1, appReport.get(0).getApplicationId());
// Create a historyManager, and set the max_apps can be loaded
// as 1.
Configuration conf = new YarnConfiguration();
conf.setLong(YarnConfiguration.APPLICATION_HISTORY_MAX_APPS, 1);
ApplicationHistoryManagerOnTimelineStore historyManager2 =
new ApplicationHistoryManagerOnTimelineStore(dataManager,
new ApplicationACLsManager(conf));
historyManager2.init(conf);
historyManager2.start();
@SuppressWarnings("resource")
ApplicationHistoryClientService clientService2 =
new ApplicationHistoryClientService(historyManager2);
response = clientService2.getApplications(request);
appReport = response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertTrue(appReport.size() == 1);
// Expected to get the appReport for application with appId1
Assert.assertEquals(appId1, appReport.get(0).getApplicationId());
}
@Test
public void testApplicationAttemptReport() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
GetApplicationAttemptReportRequest request =
GetApplicationAttemptReportRequest.newInstance(appAttemptId);
GetApplicationAttemptReportResponse response =
clientService.getApplicationAttemptReport(request);
ApplicationAttemptReport attemptReport =
response.getApplicationAttemptReport();
Assert.assertNotNull(attemptReport);
Assert.assertEquals("appattempt_0_0001_000001", attemptReport
.getApplicationAttemptId().toString());
}
@Test
public void testApplicationAttempts() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ApplicationAttemptId appAttemptId1 =
ApplicationAttemptId.newInstance(appId, 2);
GetApplicationAttemptsRequest request =
GetApplicationAttemptsRequest.newInstance(appId);
GetApplicationAttemptsResponse response =
clientService.getApplicationAttempts(request);
List<ApplicationAttemptReport> attemptReports =
response.getApplicationAttemptList();
Assert.assertNotNull(attemptReports);
Assert.assertEquals(appAttemptId, attemptReports.get(0)
.getApplicationAttemptId());
Assert.assertEquals(appAttemptId1, attemptReports.get(1)
.getApplicationAttemptId());
}
@Test
public void testContainerReport() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
GetContainerReportRequest request =
GetContainerReportRequest.newInstance(containerId);
GetContainerReportResponse response =
clientService.getContainerReport(request);
ContainerReport container = response.getContainerReport();
Assert.assertNotNull(container);
Assert.assertEquals(containerId, container.getContainerId());
Assert.assertEquals("http://0.0.0.0:8188/applicationhistory/logs/" +
"test host:100/container_0_0001_01_000001/" +
"container_0_0001_01_000001/user1", container.getLogUrl());
}
@Test
public void testContainers() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 2);
GetContainersRequest request =
GetContainersRequest.newInstance(appAttemptId);
GetContainersResponse response =
clientService.getContainers(request);
List<ContainerReport> containers = response.getContainerList();
Assert.assertNotNull(containers);
Assert.assertEquals(containerId, containers.get(0).getContainerId());
Assert.assertEquals(containerId1, containers.get(1).getContainerId());
}
}
| 12,146 | 44.324627 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
public class ApplicationHistoryStoreTestUtils {
protected ApplicationHistoryStore store;
protected void writeApplicationStartData(ApplicationId appId)
throws IOException {
store.applicationStarted(ApplicationStartData.newInstance(appId,
appId.toString(), "test type", "test queue", "test user", 0, 0));
}
protected void writeApplicationFinishData(ApplicationId appId)
throws IOException {
store.applicationFinished(ApplicationFinishData.newInstance(appId, 0,
appId.toString(), FinalApplicationStatus.UNDEFINED,
YarnApplicationState.FINISHED));
}
protected void writeApplicationAttemptStartData(
ApplicationAttemptId appAttemptId) throws IOException {
store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
appAttemptId, appAttemptId.toString(), 0,
ContainerId.newContainerId(appAttemptId, 1)));
}
protected void writeApplicationAttemptFinishData(
ApplicationAttemptId appAttemptId) throws IOException {
store.applicationAttemptFinished(ApplicationAttemptFinishData.newInstance(
appAttemptId, appAttemptId.toString(), "test tracking url",
FinalApplicationStatus.UNDEFINED, YarnApplicationAttemptState.FINISHED));
}
@SuppressWarnings("deprecation")
protected void writeContainerStartData(ContainerId containerId)
throws IOException {
store.containerStarted(ContainerStartData.newInstance(containerId,
Resource.newInstance(0, 0), NodeId.newInstance("localhost", 0),
Priority.newInstance(containerId.getId()), 0));
}
protected void writeContainerFinishData(ContainerId containerId)
throws IOException {
store.containerFinished(ContainerFinishData.newInstance(containerId, 0,
containerId.toString(), 0, ContainerState.COMPLETE));
}
}
| 3,867 | 43.976744 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.junit.Before;
import org.junit.Test;
public class TestMemoryApplicationHistoryStore extends
ApplicationHistoryStoreTestUtils {
@Before
public void setup() {
store = new MemoryApplicationHistoryStore();
}
@Test
public void testReadWriteApplicationHistory() throws Exception {
// Out of order
ApplicationId appId = ApplicationId.newInstance(0, 1);
try {
writeApplicationFinishData(appId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains(
"is stored before the start information"));
}
// Normal
int numApps = 5;
for (int i = 1; i <= numApps; ++i) {
appId = ApplicationId.newInstance(0, i);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
}
Assert.assertEquals(numApps, store.getAllApplications().size());
for (int i = 1; i <= numApps; ++i) {
appId = ApplicationId.newInstance(0, i);
ApplicationHistoryData data = store.getApplication(appId);
Assert.assertNotNull(data);
Assert.assertEquals(appId.toString(), data.getApplicationName());
Assert.assertEquals(appId.toString(), data.getDiagnosticsInfo());
}
// Write again
appId = ApplicationId.newInstance(0, 1);
try {
writeApplicationStartData(appId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationFinishData(appId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
@Test
public void testReadWriteApplicationAttemptHistory() throws Exception {
// Out of order
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains(
"is stored before the start information"));
}
// Normal
int numAppAttempts = 5;
writeApplicationStartData(appId);
for (int i = 1; i <= numAppAttempts; ++i) {
appAttemptId = ApplicationAttemptId.newInstance(appId, i);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
}
Assert.assertEquals(numAppAttempts, store.getApplicationAttempts(appId)
.size());
for (int i = 1; i <= numAppAttempts; ++i) {
appAttemptId = ApplicationAttemptId.newInstance(appId, i);
ApplicationAttemptHistoryData data =
store.getApplicationAttempt(appAttemptId);
Assert.assertNotNull(data);
Assert.assertEquals(appAttemptId.toString(), data.getHost());
Assert.assertEquals(appAttemptId.toString(), data.getDiagnosticsInfo());
}
writeApplicationFinishData(appId);
// Write again
appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
@SuppressWarnings("deprecation")
@Test
public void testReadWriteContainerHistory() throws Exception {
// Out of order
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerFinishData(containerId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains(
"is stored before the start information"));
}
// Normal
writeApplicationAttemptStartData(appAttemptId);
int numContainers = 5;
for (int i = 1; i <= numContainers; ++i) {
containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
Assert
.assertEquals(numContainers, store.getContainers(appAttemptId).size());
for (int i = 1; i <= numContainers; ++i) {
containerId = ContainerId.newContainerId(appAttemptId, i);
ContainerHistoryData data = store.getContainer(containerId);
Assert.assertNotNull(data);
Assert.assertEquals(Priority.newInstance(containerId.getId()),
data.getPriority());
Assert.assertEquals(containerId.toString(), data.getDiagnosticsInfo());
}
ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
masterContainer.getContainerId());
writeApplicationAttemptFinishData(appAttemptId);
// Write again
containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerStartData(containerId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
@Test
public void testMassiveWriteContainerHistory() throws IOException {
long mb = 1024 * 1024;
Runtime runtime = Runtime.getRuntime();
long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / mb;
int numContainers = 100000;
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
for (int i = 1; i <= numContainers; ++i) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
}
| 7,741 | 36.582524 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerOnTimelineStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryManagerOnTimelineStore;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Singleton;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
@RunWith(Parameterized.class)
public class TestAHSWebServices extends JerseyTestBase {
private static ApplicationHistoryClientService historyClientService;
private static final String[] USERS = new String[] { "foo" , "bar" };
private static final int MAX_APPS = 5;
@BeforeClass
public static void setupClass() throws Exception {
Configuration conf = new YarnConfiguration();
TimelineStore store =
TestApplicationHistoryManagerOnTimelineStore.createStore(MAX_APPS);
TimelineACLsManager aclsManager = new TimelineACLsManager(conf);
TimelineDataManager dataManager =
new TimelineDataManager(store, aclsManager);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "foo");
dataManager.init(conf);
ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
ApplicationHistoryManagerOnTimelineStore historyManager =
new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
historyManager.init(conf);
historyClientService = new ApplicationHistoryClientService(historyManager) {
@Override
protected void serviceStart() throws Exception {
// Do Nothing
}
};
historyClientService.init(conf);
historyClientService.start();
}
@AfterClass
public static void tearDownClass() throws Exception {
if (historyClientService != null) {
historyClientService.stop();
}
}
@Parameterized.Parameters
public static Collection<Object[]> rounds() {
return Arrays.asList(new Object[][] { { 0 }, { 1 } });
}
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
bind(JAXBContextResolver.class);
bind(AHSWebServices.class);
bind(GenericExceptionHandler.class);
bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
serve("/*").with(GuiceContainer.class);
filter("/*").through(TestSimpleAuthFilter.class);
}
});
@Singleton
public static class TestSimpleAuthFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties properties =
super.getConfiguration(configPrefix, filterConfig);
properties.put(AuthenticationFilter.AUTH_TYPE, "simple");
properties.put(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
return properties;
}
}
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
private int round;
public TestAHSWebServices(int round) {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
this.round = round;
}
@Test
public void testInvalidApp() {
ApplicationId appId = ApplicationId.newInstance(0, MAX_APPS + 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals("404 not found expected", Status.NOT_FOUND,
response.getClientResponseStatus());
}
@Test
public void testInvalidAttempt() {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, MAX_APPS + 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.path(appAttemptId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
if (round == 1) {
assertEquals(Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals("404 not found expected", Status.NOT_FOUND,
response.getClientResponseStatus());
}
@Test
public void testInvalidContainer() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId,
MAX_APPS + 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.path(appAttemptId.toString()).path("containers")
.path(containerId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
if (round == 1) {
assertEquals(
Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals("404 not found expected", Status.NOT_FOUND,
response.getClientResponseStatus());
}
@Test
public void testInvalidUri() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr =
r.path("ws").path("v1").path("applicationhistory").path("bogus")
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidUri2() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidAccept() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr =
r.path("ws").path("v1").path("applicationhistory")
.queryParam("user.name", USERS[round])
.accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,
response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testAbout() throws Exception {
WebResource r = resource();
ClientResponse response = r
.path("ws").path("v1").path("applicationhistory").path("about")
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineAbout actualAbout = response.getEntity(TimelineAbout.class);
TimelineAbout expectedAbout =
TimelineUtils.createTimelineAbout("Generic History Service API");
Assert.assertNotNull(
"Timeline service about response is null", actualAbout);
Assert.assertEquals(expectedAbout.getAbout(), actualAbout.getAbout());
Assert.assertEquals(expectedAbout.getTimelineServiceVersion(),
actualAbout.getTimelineServiceVersion());
Assert.assertEquals(expectedAbout.getTimelineServiceBuildVersion(),
actualAbout.getTimelineServiceBuildVersion());
Assert.assertEquals(expectedAbout.getTimelineServiceVersionBuiltOn(),
actualAbout.getTimelineServiceVersionBuiltOn());
Assert.assertEquals(expectedAbout.getHadoopVersion(),
actualAbout.getHadoopVersion());
Assert.assertEquals(expectedAbout.getHadoopBuildVersion(),
actualAbout.getHadoopBuildVersion());
Assert.assertEquals(expectedAbout.getHadoopVersionBuiltOn(),
actualAbout.getHadoopVersionBuiltOn());
}
@Test
public void testAppsQuery() throws Exception {
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.queryParam("state", YarnApplicationState.FINISHED.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject apps = json.getJSONObject("apps");
assertEquals("incorrect number of elements", 1, apps.length());
JSONArray array = apps.getJSONArray("app");
assertEquals("incorrect number of elements", 5, array.length());
}
@Test
public void testSingleApp() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject app = json.getJSONObject("app");
assertEquals(appId.toString(), app.getString("appId"));
assertEquals("test app", app.get("name"));
assertEquals(round == 0 ? "test diagnostics info" : "",
app.get("diagnosticsInfo"));
assertEquals("test queue", app.get("queue"));
assertEquals("user1", app.get("user"));
assertEquals("test app type", app.get("type"));
assertEquals(FinalApplicationStatus.UNDEFINED.toString(),
app.get("finalAppStatus"));
assertEquals(YarnApplicationState.FINISHED.toString(), app.get("appState"));
}
@Test
public void testMultipleAttempts() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
if (round == 1) {
assertEquals(
Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject appAttempts = json.getJSONObject("appAttempts");
assertEquals("incorrect number of elements", 1, appAttempts.length());
JSONArray array = appAttempts.getJSONArray("appAttempt");
assertEquals("incorrect number of elements", 5, array.length());
}
@Test
public void testSingleAttempt() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.path(appAttemptId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
if (round == 1) {
assertEquals(
Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject appAttempt = json.getJSONObject("appAttempt");
assertEquals(appAttemptId.toString(), appAttempt.getString("appAttemptId"));
assertEquals("test host", appAttempt.getString("host"));
assertEquals("test diagnostics info",
appAttempt.getString("diagnosticsInfo"));
assertEquals("test tracking url", appAttempt.getString("trackingUrl"));
assertEquals(YarnApplicationAttemptState.FINISHED.toString(),
appAttempt.get("appAttemptState"));
}
@Test
public void testMultipleContainers() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.path(appAttemptId.toString()).path("containers")
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
if (round == 1) {
assertEquals(
Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject containers = json.getJSONObject("containers");
assertEquals("incorrect number of elements", 1, containers.length());
JSONArray array = containers.getJSONArray("container");
assertEquals("incorrect number of elements", 5, array.length());
}
@Test
public void testSingleContainer() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString()).path("appattempts")
.path(appAttemptId.toString()).path("containers")
.path(containerId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
if (round == 1) {
assertEquals(
Status.FORBIDDEN, response.getClientResponseStatus());
return;
}
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject container = json.getJSONObject("container");
assertEquals(containerId.toString(), container.getString("containerId"));
assertEquals("test diagnostics info", container.getString("diagnosticsInfo"));
assertEquals("-1", container.getString("allocatedMB"));
assertEquals("-1", container.getString("allocatedVCores"));
assertEquals(NodeId.newInstance("test host", 100).toString(),
container.getString("assignedNodeId"));
assertEquals("-1", container.getString("priority"));
Configuration conf = new YarnConfiguration();
assertEquals(WebAppUtils.getHttpSchemePrefix(conf) +
WebAppUtils.getAHSWebAppURLWithoutScheme(conf) +
"/applicationhistory/logs/test host:100/container_0_0001_01_000001/" +
"container_0_0001_01_000001/user1", container.getString("logUrl"));
assertEquals(ContainerState.COMPLETE.toString(),
container.getString("containerState"));
}
}
| 20,209 | 41.637131 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.webapp.Params.TITLE;
import static org.mockito.Mockito.mock;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStoreTestUtils;
import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
import org.apache.hadoop.yarn.util.StringHelper;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Before;
import org.junit.Test;
import com.google.inject.Injector;
public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
public void setApplicationHistoryStore(ApplicationHistoryStore store) {
this.store = store;
}
@Before
public void setup() {
store = new MemoryApplicationHistoryStore();
}
@Test
public void testAppControllerIndex() throws Exception {
ApplicationHistoryManager ahManager = mock(ApplicationHistoryManager.class);
Injector injector =
WebAppTests.createMockInjector(ApplicationHistoryManager.class,
ahManager);
AHSController controller = injector.getInstance(AHSController.class);
controller.index();
Assert
.assertEquals("Application History", controller.get(TITLE, "unknown"));
}
@Test
public void testView() throws Exception {
Injector injector =
WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
mockApplicationHistoryClientService(5, 1, 1));
AHSView ahsViewInstance = injector.getInstance(AHSView.class);
ahsViewInstance.render();
WebAppTests.flushOutput(injector);
ahsViewInstance.set(YarnWebParams.APP_STATE,
YarnApplicationState.FAILED.toString());
ahsViewInstance.render();
WebAppTests.flushOutput(injector);
ahsViewInstance.set(YarnWebParams.APP_STATE, StringHelper.cjoin(
YarnApplicationState.FAILED.toString(), YarnApplicationState.KILLED));
ahsViewInstance.render();
WebAppTests.flushOutput(injector);
}
@Test
public void testAboutPage() throws Exception {
Injector injector =
WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
mockApplicationHistoryClientService(0, 0, 0));
AboutPage aboutPageInstance = injector.getInstance(AboutPage.class);
aboutPageInstance.render();
WebAppTests.flushOutput(injector);
aboutPageInstance.render();
WebAppTests.flushOutput(injector);
}
@Test
public void testAppPage() throws Exception {
Injector injector =
WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
mockApplicationHistoryClientService(1, 5, 1));
AppPage appPageInstance = injector.getInstance(AppPage.class);
appPageInstance.render();
WebAppTests.flushOutput(injector);
appPageInstance.set(YarnWebParams.APPLICATION_ID, ApplicationId
.newInstance(0, 1).toString());
appPageInstance.render();
WebAppTests.flushOutput(injector);
}
@Test
public void testAppAttemptPage() throws Exception {
Injector injector =
WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
mockApplicationHistoryClientService(1, 1, 5));
AppAttemptPage appAttemptPageInstance =
injector.getInstance(AppAttemptPage.class);
appAttemptPageInstance.render();
WebAppTests.flushOutput(injector);
appAttemptPageInstance.set(YarnWebParams.APPLICATION_ATTEMPT_ID,
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1)
.toString());
appAttemptPageInstance.render();
WebAppTests.flushOutput(injector);
}
@Test
public void testContainerPage() throws Exception {
Injector injector =
WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
mockApplicationHistoryClientService(1, 1, 1));
ContainerPage containerPageInstance =
injector.getInstance(ContainerPage.class);
containerPageInstance.render();
WebAppTests.flushOutput(injector);
containerPageInstance.set(
YarnWebParams.CONTAINER_ID,
ContainerId
.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1),
1).toString());
containerPageInstance.render();
WebAppTests.flushOutput(injector);
}
ApplicationHistoryClientService mockApplicationHistoryClientService(int numApps,
int numAppAttempts, int numContainers) throws Exception {
ApplicationHistoryManager ahManager =
new MockApplicationHistoryManagerImpl(store);
ApplicationHistoryClientService historyClientService =
new ApplicationHistoryClientService(ahManager);
for (int i = 1; i <= numApps; ++i) {
ApplicationId appId = ApplicationId.newInstance(0, i);
writeApplicationStartData(appId);
for (int j = 1; j <= numAppAttempts; ++j) {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
writeApplicationAttemptStartData(appAttemptId);
for (int k = 1; k <= numContainers; ++k) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
writeApplicationAttemptFinishData(appAttemptId);
}
writeApplicationFinishData(appId);
}
return historyClientService;
}
class MockApplicationHistoryManagerImpl extends ApplicationHistoryManagerImpl {
public MockApplicationHistoryManagerImpl(ApplicationHistoryStore store) {
super();
init(new YarnConfiguration());
start();
}
@Override
protected ApplicationHistoryStore createApplicationHistoryStore(
Configuration conf) {
return store;
}
};
}
| 7,392 | 35.965 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.iq80.leveldb.DBException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TestLeveldbTimelineStore extends TimelineStoreTestUtils {
private FileContext fsContext;
private File fsPath;
private Configuration config = new YarnConfiguration();
@Before
public void setup() throws Exception {
fsContext = FileContext.getLocalFSFileContext();
fsPath = new File("target", this.getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
config.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
fsPath.getAbsolutePath());
config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false);
store = new LeveldbTimelineStore();
store.init(config);
store.start();
loadTestEntityData();
loadVerificationEntityData();
loadTestDomainData();
}
@After
public void tearDown() throws Exception {
store.stop();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
}
@Test
public void testRootDirPermission() throws IOException {
FileSystem fs = FileSystem.getLocal(new YarnConfiguration());
FileStatus file = fs.getFileStatus(
new Path(fsPath.getAbsolutePath(), LeveldbTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK, file.getPermission());
}
@Test
public void testGetSingleEntity() throws IOException {
super.testGetSingleEntity();
((LeveldbTimelineStore)store).clearStartTimeCache();
super.testGetSingleEntity();
loadTestEntityData();
}
@Test
public void testGetEntities() throws IOException {
super.testGetEntities();
}
@Test
public void testGetEntitiesWithFromId() throws IOException {
super.testGetEntitiesWithFromId();
}
@Test
public void testGetEntitiesWithFromTs() throws IOException {
super.testGetEntitiesWithFromTs();
}
@Test
public void testGetEntitiesWithPrimaryFilters() throws IOException {
super.testGetEntitiesWithPrimaryFilters();
}
@Test
public void testGetEntitiesWithSecondaryFilters() throws IOException {
super.testGetEntitiesWithSecondaryFilters();
}
@Test
public void testGetEvents() throws IOException {
super.testGetEvents();
}
@Test
public void testCacheSizes() {
Configuration conf = new Configuration();
assertEquals(10000, LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
assertEquals(10000, LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
10001);
assertEquals(10001, LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
conf = new Configuration();
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
10002);
assertEquals(10002, LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
}
private boolean deleteNextEntity(String entityType, byte[] ts)
throws IOException, InterruptedException {
LeveldbIterator iterator = null;
LeveldbIterator pfIterator = null;
try {
iterator = ((LeveldbTimelineStore)store).getDbIterator(false);
pfIterator = ((LeveldbTimelineStore)store).getDbIterator(false);
return ((LeveldbTimelineStore)store).deleteNextEntity(entityType, ts,
iterator, pfIterator, false);
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(null, iterator, pfIterator);
}
}
@Test
public void testGetEntityTypes() throws IOException {
List<String> entityTypes = ((LeveldbTimelineStore)store).getEntityTypes();
assertEquals(7, entityTypes.size());
assertEquals("ACL_ENTITY_TYPE_1", entityTypes.get(0));
assertEquals("OLD_ENTITY_TYPE_1", entityTypes.get(1));
assertEquals(entityType1, entityTypes.get(2));
assertEquals(entityType2, entityTypes.get(3));
assertEquals(entityType4, entityTypes.get(4));
assertEquals(entityType5, entityTypes.get(5));
}
@Test
public void testDeleteEntities() throws IOException, InterruptedException {
assertEquals(3, getEntities("type_1").size());
assertEquals(1, getEntities("type_2").size());
assertEquals(false, deleteNextEntity(entityType1,
writeReverseOrderedLong(60L)));
assertEquals(3, getEntities("type_1").size());
assertEquals(1, getEntities("type_2").size());
assertEquals(true, deleteNextEntity(entityType1,
writeReverseOrderedLong(123L)));
List<TimelineEntity> entities = getEntities("type_2");
assertEquals(1, entities.size());
verifyEntityInfo(entityId2, entityType2, events2, Collections.singletonMap(
entityType1, Collections.singleton(entityId1b)), EMPTY_PRIMARY_FILTERS,
EMPTY_MAP, entities.get(0), domainId1);
entities = getEntitiesWithPrimaryFilter("type_1", userFilter);
assertEquals(2, entities.size());
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
// can retrieve entities across domains
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId2);
((LeveldbTimelineStore)store).discardOldEntities(0L);
assertEquals(2, getEntities("type_1").size());
assertEquals(0, getEntities("type_2").size());
assertEquals(6, ((LeveldbTimelineStore)store).getEntityTypes().size());
((LeveldbTimelineStore)store).discardOldEntities(123L);
assertEquals(0, getEntities("type_1").size());
assertEquals(0, getEntities("type_2").size());
assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
assertEquals(0, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
}
@Test
public void testDeleteEntitiesPrimaryFilters()
throws IOException, InterruptedException {
Map<String, Set<Object>> primaryFilter =
Collections.singletonMap("user", Collections.singleton(
(Object) "otheruser"));
TimelineEntities atsEntities = new TimelineEntities();
atsEntities.setEntities(Collections.singletonList(createEntity(entityId1b,
entityType1, 789l, Collections.singletonList(ev2), null, primaryFilter,
null, domainId1)));
TimelinePutResponse response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
NameValuePair pfPair = new NameValuePair("user", "otheruser");
List<TimelineEntity> entities = getEntitiesWithPrimaryFilter("type_1",
pfPair);
assertEquals(1, entities.size());
verifyEntityInfo(entityId1b, entityType1, Collections.singletonList(ev2),
EMPTY_REL_ENTITIES, primaryFilter, EMPTY_MAP, entities.get(0),
domainId1);
entities = getEntitiesWithPrimaryFilter("type_1", userFilter);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
((LeveldbTimelineStore)store).discardOldEntities(-123L);
assertEquals(1, getEntitiesWithPrimaryFilter("type_1", pfPair).size());
assertEquals(3, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
((LeveldbTimelineStore)store).discardOldEntities(123L);
assertEquals(0, getEntities("type_1").size());
assertEquals(0, getEntities("type_2").size());
assertEquals(0, ((LeveldbTimelineStore)store).getEntityTypes().size());
assertEquals(0, getEntitiesWithPrimaryFilter("type_1", pfPair).size());
assertEquals(0, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
}
@Test
public void testFromTsWithDeletion()
throws IOException, InterruptedException {
long l = System.currentTimeMillis();
assertEquals(3, getEntitiesFromTs("type_1", l).size());
assertEquals(1, getEntitiesFromTs("type_2", l).size());
assertEquals(3, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
l).size());
((LeveldbTimelineStore)store).discardOldEntities(123L);
assertEquals(0, getEntitiesFromTs("type_1", l).size());
assertEquals(0, getEntitiesFromTs("type_2", l).size());
assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
l).size());
assertEquals(0, getEntities("type_1").size());
assertEquals(0, getEntities("type_2").size());
assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
l).size());
loadTestEntityData();
assertEquals(0, getEntitiesFromTs("type_1", l).size());
assertEquals(0, getEntitiesFromTs("type_2", l).size());
assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
l).size());
assertEquals(3, getEntities("type_1").size());
assertEquals(1, getEntities("type_2").size());
assertEquals(3, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
}
@Test
public void testCheckVersion() throws IOException {
LeveldbTimelineStore dbStore = (LeveldbTimelineStore) store;
// default version
Version defaultVersion = dbStore.getCurrentVersion();
Assert.assertEquals(defaultVersion, dbStore.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
dbStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion, dbStore.loadVersion());
restartTimelineStore();
dbStore = (LeveldbTimelineStore) store;
// overwrite the compatible version
Assert.assertEquals(defaultVersion, dbStore.loadVersion());
// incompatible version
Version incompatibleVersion = Version.newInstance(
defaultVersion.getMajorVersion() + 1, defaultVersion.getMinorVersion());
dbStore.storeVersion(incompatibleVersion);
try {
restartTimelineStore();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for timeline store"));
}
}
@Test
public void testValidateConfig() throws IOException {
Configuration copyConfig = new YarnConfiguration(config);
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, 0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, 0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, -1);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig
.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert
.assertTrue(e
.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig
.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert
.assertTrue(e
.getMessage()
.contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE));
}
config = copyConfig;
restartTimelineStore();
}
private void restartTimelineStore() throws IOException {
// need to close so leveldb releases database lock
if (store != null) {
store.close();
}
store = new LeveldbTimelineStore();
store.init(config);
store.start();
}
@Test
public void testGetDomain() throws IOException {
super.testGetDomain();
}
@Test
public void testGetDomains() throws IOException {
super.testGetDomains();
}
@Test
public void testRelatingToNonExistingEntity() throws IOException {
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("TEST_ENTITY_TYPE_1");
entityToStore.setEntityId("TEST_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("TEST_ENTITY_TYPE_2", "TEST_ENTITY_ID_2");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("TEST_ENTITY_ID_2", "TEST_ENTITY_TYPE_2", null);
Assert.assertNotNull(entityToGet);
Assert.assertEquals("DEFAULT", entityToGet.getDomainId());
Assert.assertEquals("TEST_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
Assert.assertEquals("TEST_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
}
@Test
public void testRelatingToOldEntityWithoutDomainId() throws IOException {
// New entity is put in the default domain
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_1");
entityToStore.setEntityId("NEW_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entityToGet);
Assert.assertNull(entityToGet.getDomainId());
Assert.assertEquals("NEW_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
Assert.assertEquals("NEW_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
// New entity is not put in the default domain
entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_2");
entityToStore.setEntityId("NEW_ENTITY_ID_2");
entityToStore.setDomainId("NON_DEFAULT");
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
entities = new TimelineEntities();
entities.addEntity(entityToStore);
TimelinePutResponse response = store.put(entities);
Assert.assertEquals(1, response.getErrors().size());
Assert.assertEquals(TimelinePutError.FORBIDDEN_RELATION,
response.getErrors().get(0).getErrorCode());
entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entityToGet);
Assert.assertNull(entityToGet.getDomainId());
// Still have one related entity
Assert.assertEquals(1, entityToGet.getRelatedEntities().keySet().size());
Assert.assertEquals(1, entityToGet.getRelatedEntities().values()
.iterator().next().size());
}
}
| 18,535 | 38.606838 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestTimelineDataManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestTimelineDataManager extends TimelineStoreTestUtils {
private FileContext fsContext;
private File fsPath;
private TimelineDataManager dataManaer;
private static TimelineACLsManager aclsManager;
private static AdminACLsManager adminACLsManager;
@Before
public void setup() throws Exception {
fsPath = new File("target", this.getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
fsContext = FileContext.getLocalFSFileContext();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
Configuration conf = new YarnConfiguration();
conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
fsPath.getAbsolutePath());
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false);
store = new LeveldbTimelineStore();
store.init(conf);
store.start();
loadTestEntityData();
loadVerificationEntityData();
loadTestDomainData();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
aclsManager = new TimelineACLsManager(conf);
dataManaer = new TimelineDataManager(store, aclsManager);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
dataManaer.init(conf);
adminACLsManager = new AdminACLsManager(conf);
}
@After
public void tearDown() throws Exception {
if (store != null) {
store.stop();
}
if (fsContext != null) {
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
}
}
@Test
public void testGetOldEntityWithOutDomainId() throws Exception {
TimelineEntity entity = dataManaer.getEntity(
"OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1", null,
UserGroupInformation.getCurrentUser());
Assert.assertNotNull(entity);
Assert.assertEquals("OLD_ENTITY_ID_1", entity.getEntityId());
Assert.assertEquals("OLD_ENTITY_TYPE_1", entity.getEntityType());
Assert.assertEquals(
TimelineDataManager.DEFAULT_DOMAIN_ID, entity.getDomainId());
}
@Test
public void testGetEntitiesAclEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager =
aclsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities = dataManaer.getEntities(
"ACL_ENTITY_TYPE_1", null, null, null, null, null, null, 1l, null,
UserGroupInformation.createUserForTesting("owner_1", new String[] {"group1"}));
Assert.assertEquals(1, entities.getEntities().size());
Assert.assertEquals("ACL_ENTITY_ID_11",
entities.getEntities().get(0).getEntityId());
} finally {
aclsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetOldEntitiesWithOutDomainId() throws Exception {
TimelineEntities entities = dataManaer.getEntities(
"OLD_ENTITY_TYPE_1", null, null, null, null, null, null, null, null,
UserGroupInformation.getCurrentUser());
Assert.assertEquals(2, entities.getEntities().size());
Assert.assertEquals("OLD_ENTITY_ID_2",
entities.getEntities().get(0).getEntityId());
Assert.assertEquals("OLD_ENTITY_TYPE_1",
entities.getEntities().get(0).getEntityType());
Assert.assertEquals(TimelineDataManager.DEFAULT_DOMAIN_ID,
entities.getEntities().get(0).getDomainId());
Assert.assertEquals("OLD_ENTITY_ID_1",
entities.getEntities().get(1).getEntityId());
Assert.assertEquals("OLD_ENTITY_TYPE_1",
entities.getEntities().get(1).getEntityType());
Assert.assertEquals(TimelineDataManager.DEFAULT_DOMAIN_ID,
entities.getEntities().get(1).getDomainId());
}
@Test
public void testUpdatingOldEntityWithoutDomainId() throws Exception {
// Set the domain to the default domain when updating
TimelineEntity entity = new TimelineEntity();
entity.setEntityType("OLD_ENTITY_TYPE_1");
entity.setEntityId("OLD_ENTITY_ID_1");
entity.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entity.addOtherInfo("NEW_OTHER_INFO_KEY", "NEW_OTHER_INFO_VALUE");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entity);
TimelinePutResponse response = dataManaer.postEntities(
entities, UserGroupInformation.getCurrentUser());
Assert.assertEquals(0, response.getErrors().size());
entity = store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entity);
// Even in leveldb, the domain is updated to the default domain Id
Assert.assertEquals(
TimelineDataManager.DEFAULT_DOMAIN_ID, entity.getDomainId());
Assert.assertEquals(1, entity.getOtherInfo().size());
Assert.assertEquals("NEW_OTHER_INFO_KEY",
entity.getOtherInfo().keySet().iterator().next());
Assert.assertEquals("NEW_OTHER_INFO_VALUE",
entity.getOtherInfo().values().iterator().next());
// Set the domain to the non-default domain when updating
entity = new TimelineEntity();
entity.setEntityType("OLD_ENTITY_TYPE_1");
entity.setEntityId("OLD_ENTITY_ID_2");
entity.setDomainId("NON_DEFAULT");
entity.addOtherInfo("NEW_OTHER_INFO_KEY", "NEW_OTHER_INFO_VALUE");
entities = new TimelineEntities();
entities.addEntity(entity);
response = dataManaer.postEntities(
entities, UserGroupInformation.getCurrentUser());
Assert.assertEquals(1, response.getErrors().size());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,
response.getErrors().get(0).getErrorCode());
entity = store.getEntity("OLD_ENTITY_ID_2", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entity);
// In leveldb, the domain Id is still null
Assert.assertNull(entity.getDomainId());
// Updating is not executed
Assert.assertEquals(0, entity.getOtherInfo().size());
}
}
| 7,344 | 40.732955 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestRollingLevelDB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.File;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.iq80.leveldb.DB;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/** Test class for verification of RollingLevelDB. */
public class TestRollingLevelDB {
private Configuration conf = new YarnConfiguration();
private FileSystem lfs;
private MyRollingLevelDB rollingLevelDB;
/** RollingLevelDB for testing that has a setting current time. */
public static class MyRollingLevelDB extends RollingLevelDB {
private long currentTimeMillis;
MyRollingLevelDB() {
super("Test");
this.currentTimeMillis = System.currentTimeMillis();
}
@Override
protected long currentTimeMillis() {
return currentTimeMillis;
}
public void setCurrentTimeMillis(long time) {
this.currentTimeMillis = time;
}
};
@Before
public void setup() throws Exception {
lfs = FileSystem.getLocal(conf);
File fsPath = new File("target", this.getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
fsPath.getAbsolutePath());
lfs.delete(new Path(fsPath.getAbsolutePath()), true);
rollingLevelDB = new MyRollingLevelDB();
}
@Test
public void testInsertAfterRollPeriodRollsDB() throws Exception {
rollingLevelDB.init(conf);
long now = rollingLevelDB.currentTimeMillis();
DB db = rollingLevelDB.getDBForStartTime(now);
long startTime = rollingLevelDB.getStartTimeFor(db);
Assert.assertEquals("Received level db for incorrect start time",
rollingLevelDB.computeCurrentCheckMillis(now),
startTime);
now = rollingLevelDB.getNextRollingTimeMillis();
rollingLevelDB.setCurrentTimeMillis(now);
db = rollingLevelDB.getDBForStartTime(now);
startTime = rollingLevelDB.getStartTimeFor(db);
Assert.assertEquals("Received level db for incorrect start time",
rollingLevelDB.computeCurrentCheckMillis(now),
startTime);
}
@Test
public void testInsertForPreviousPeriodAfterRollPeriodRollsDB()
throws Exception {
rollingLevelDB.init(conf);
long now = rollingLevelDB.currentTimeMillis();
now = rollingLevelDB.computeCurrentCheckMillis(now);
rollingLevelDB.setCurrentTimeMillis(now);
DB db = rollingLevelDB.getDBForStartTime(now - 1);
long startTime = rollingLevelDB.getStartTimeFor(db);
Assert.assertEquals("Received level db for incorrect start time",
rollingLevelDB.computeCurrentCheckMillis(now - 1),
startTime);
}
}
| 3,557 | 34.227723 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestGenericObjectMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TestGenericObjectMapper {
@Test
public void testEncoding() {
testEncoding(Long.MAX_VALUE);
testEncoding(Long.MIN_VALUE);
testEncoding(0l);
testEncoding(128l);
testEncoding(256l);
testEncoding(512l);
testEncoding(-256l);
}
private static void testEncoding(long l) {
byte[] b = GenericObjectMapper.writeReverseOrderedLong(l);
assertEquals("error decoding", l,
GenericObjectMapper.readReverseOrderedLong(b, 0));
byte[] buf = new byte[16];
System.arraycopy(b, 0, buf, 5, 8);
assertEquals("error decoding at offset", l,
GenericObjectMapper.readReverseOrderedLong(buf, 5));
if (l > Long.MIN_VALUE) {
byte[] a = GenericObjectMapper.writeReverseOrderedLong(l-1);
assertEquals("error preserving ordering", 1,
WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length));
}
if (l < Long.MAX_VALUE) {
byte[] c = GenericObjectMapper.writeReverseOrderedLong(l+1);
assertEquals("error preserving ordering", 1,
WritableComparator.compareBytes(b, 0, b.length, c, 0, c.length));
}
}
private static void verify(Object o) throws IOException {
assertEquals(o, GenericObjectMapper.read(GenericObjectMapper.write(o)));
}
@Test
public void testValueTypes() throws IOException {
verify(Integer.MAX_VALUE);
verify(Integer.MIN_VALUE);
assertEquals(Integer.MAX_VALUE, GenericObjectMapper.read(
GenericObjectMapper.write((long) Integer.MAX_VALUE)));
assertEquals(Integer.MIN_VALUE, GenericObjectMapper.read(
GenericObjectMapper.write((long) Integer.MIN_VALUE)));
verify((long)Integer.MAX_VALUE + 1l);
verify((long)Integer.MIN_VALUE - 1l);
verify(Long.MAX_VALUE);
verify(Long.MIN_VALUE);
assertEquals(42, GenericObjectMapper.read(GenericObjectMapper.write(42l)));
verify(42);
verify(1.23);
verify("abc");
verify(true);
List<String> list = new ArrayList<String>();
list.add("123");
list.add("abc");
verify(list);
Map<String,String> map = new HashMap<String,String>();
map.put("k1","v1");
map.put("k2","v2");
verify(map);
}
}
| 3,511 | 33.097087 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestRollingLevelDBTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.records.Version;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.log.Log;
/** Test class to verify RollingLevelDBTimelineStore. */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TestRollingLevelDBTimelineStore extends TimelineStoreTestUtils {
private FileContext fsContext;
private File fsPath;
private Configuration config = new YarnConfiguration();
@Before
public void setup() throws Exception {
fsContext = FileContext.getLocalFSFileContext();
fsPath = new File("target", this.getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
config.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
fsPath.getAbsolutePath());
config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false);
store = new RollingLevelDBTimelineStore();
store.init(config);
store.start();
loadTestEntityData();
loadVerificationEntityData();
loadTestDomainData();
}
@After
public void tearDown() throws Exception {
store.stop();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
}
@Test
public void testRootDirPermission() throws IOException {
FileSystem fs = FileSystem.getLocal(new YarnConfiguration());
FileStatus file = fs.getFileStatus(new Path(fsPath.getAbsolutePath(),
RollingLevelDBTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(RollingLevelDBTimelineStore.LEVELDB_DIR_UMASK,
file.getPermission());
}
@Test
public void testGetSingleEntity() throws IOException {
super.testGetSingleEntity();
((RollingLevelDBTimelineStore)store).clearStartTimeCache();
super.testGetSingleEntity();
loadTestEntityData();
}
@Test
public void testGetEntities() throws IOException {
super.testGetEntities();
}
@Test
public void testGetEntitiesWithFromId() throws IOException {
super.testGetEntitiesWithFromId();
}
@Test
public void testGetEntitiesWithFromTs() throws IOException {
// feature not supported
}
@Test
public void testGetEntitiesWithPrimaryFilters() throws IOException {
super.testGetEntitiesWithPrimaryFilters();
}
@Test
public void testGetEntitiesWithSecondaryFilters() throws IOException {
super.testGetEntitiesWithSecondaryFilters();
}
@Test
public void testGetEvents() throws IOException {
super.testGetEvents();
}
@Test
public void testCacheSizes() {
Configuration conf = new Configuration();
assertEquals(10000,
RollingLevelDBTimelineStore.getStartTimeReadCacheSize(conf));
assertEquals(10000,
RollingLevelDBTimelineStore.getStartTimeWriteCacheSize(conf));
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
10001);
assertEquals(10001,
RollingLevelDBTimelineStore.getStartTimeReadCacheSize(conf));
conf = new Configuration();
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
10002);
assertEquals(10002,
RollingLevelDBTimelineStore.getStartTimeWriteCacheSize(conf));
}
@Test
public void testCheckVersion() throws IOException {
RollingLevelDBTimelineStore dbStore = (RollingLevelDBTimelineStore) store;
// default version
Version defaultVersion = dbStore.getCurrentVersion();
Assert.assertEquals(defaultVersion, dbStore.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
dbStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion, dbStore.loadVersion());
restartTimelineStore();
dbStore = (RollingLevelDBTimelineStore) store;
// overwrite the compatible version
Assert.assertEquals(defaultVersion, dbStore.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
dbStore.storeVersion(incompatibleVersion);
try {
restartTimelineStore();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for timeline store"));
}
}
@Test
public void testValidateConfig() throws IOException {
Configuration copyConfig = new YarnConfiguration(config);
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, 0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, 0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, -1);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE));
}
config = copyConfig;
restartTimelineStore();
}
private void restartTimelineStore() throws IOException {
// need to close so leveldb releases database lock
if (store != null) {
store.close();
}
store = new RollingLevelDBTimelineStore();
store.init(config);
store.start();
}
@Test
public void testGetDomain() throws IOException {
super.testGetDomain();
}
@Test
public void testGetDomains() throws IOException {
super.testGetDomains();
}
@Test
public void testRelatingToNonExistingEntity() throws IOException {
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("TEST_ENTITY_TYPE_1");
entityToStore.setEntityId("TEST_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("TEST_ENTITY_TYPE_2", "TEST_ENTITY_ID_2");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("TEST_ENTITY_ID_2", "TEST_ENTITY_TYPE_2", null);
Assert.assertNotNull(entityToGet);
Assert.assertEquals("DEFAULT", entityToGet.getDomainId());
Assert.assertEquals("TEST_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
Assert.assertEquals("TEST_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
}
@Test
public void testRelatingToEntityInSamePut() throws IOException {
TimelineEntity entityToRelate = new TimelineEntity();
entityToRelate.setEntityType("TEST_ENTITY_TYPE_2");
entityToRelate.setEntityId("TEST_ENTITY_ID_2");
entityToRelate.setDomainId("TEST_DOMAIN");
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("TEST_ENTITY_TYPE_1");
entityToStore.setEntityId("TEST_ENTITY_ID_1");
entityToStore.setDomainId("TEST_DOMAIN");
entityToStore.addRelatedEntity("TEST_ENTITY_TYPE_2", "TEST_ENTITY_ID_2");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
entities.addEntity(entityToRelate);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("TEST_ENTITY_ID_2", "TEST_ENTITY_TYPE_2", null);
Assert.assertNotNull(entityToGet);
Assert.assertEquals("TEST_DOMAIN", entityToGet.getDomainId());
Assert.assertEquals("TEST_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
Assert.assertEquals("TEST_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
}
@Test
public void testRelatingToOldEntityWithoutDomainId() throws IOException {
// New entity is put in the default domain
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_1");
entityToStore.setEntityId("NEW_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entityToGet);
Assert.assertEquals("DEFAULT", entityToGet.getDomainId());
Assert.assertEquals("NEW_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
Assert.assertEquals("NEW_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
// New entity is not put in the default domain
entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_2");
entityToStore.setEntityId("NEW_ENTITY_ID_2");
entityToStore.setDomainId("NON_DEFAULT");
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
entities = new TimelineEntities();
entities.addEntity(entityToStore);
TimelinePutResponse response = store.put(entities);
Assert.assertEquals(1, response.getErrors().size());
Assert.assertEquals(TimelinePutError.FORBIDDEN_RELATION,
response.getErrors().get(0).getErrorCode());
entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
Assert.assertNotNull(entityToGet);
Assert.assertEquals("DEFAULT", entityToGet.getDomainId());
// Still have one related entity
Assert.assertEquals(1, entityToGet.getRelatedEntities().keySet().size());
Assert.assertEquals(1, entityToGet.getRelatedEntities().values()
.iterator().next().size());
}
public void testStorePerformance() throws IOException {
TimelineEntity entityToStorePrep = new TimelineEntity();
entityToStorePrep.setEntityType("TEST_ENTITY_TYPE_PREP");
entityToStorePrep.setEntityId("TEST_ENTITY_ID_PREP");
entityToStorePrep.setDomainId("TEST_DOMAIN");
entityToStorePrep.addRelatedEntity("TEST_ENTITY_TYPE_2",
"TEST_ENTITY_ID_2");
entityToStorePrep.setStartTime(0L);
TimelineEntities entitiesPrep = new TimelineEntities();
entitiesPrep.addEntity(entityToStorePrep);
store.put(entitiesPrep);
long start = System.currentTimeMillis();
int num = 1000000;
Log.info("Start test for " + num);
final String tezTaskAttemptId = "TEZ_TA";
final String tezEntityId = "attempt_1429158534256_0001_1_00_000000_";
final String tezTaskId = "TEZ_T";
final String tezDomainId = "Tez_ATS_application_1429158534256_0001";
TimelineEntity entityToStore = new TimelineEntity();
TimelineEvent startEvt = new TimelineEvent();
entityToStore.setEntityType(tezTaskAttemptId);
startEvt.setEventType("TASK_ATTEMPT_STARTED");
startEvt.setTimestamp(0);
entityToStore.addEvent(startEvt);
entityToStore.setDomainId(tezDomainId);
entityToStore.addPrimaryFilter("status", "SUCCEEDED");
entityToStore.addPrimaryFilter("applicationId",
"application_1429158534256_0001");
entityToStore.addPrimaryFilter("TEZ_VERTEX_ID",
"vertex_1429158534256_0001_1_00");
entityToStore.addPrimaryFilter("TEZ_DAG_ID", "dag_1429158534256_0001_1");
entityToStore.addPrimaryFilter("TEZ_TASK_ID",
"task_1429158534256_0001_1_00_000000");
entityToStore.setStartTime(0L);
entityToStore.addOtherInfo("startTime", 0);
entityToStore.addOtherInfo("inProgressLogsURL",
"localhost:8042/inProgressLogsURL");
entityToStore.addOtherInfo("completedLogsURL", "");
entityToStore.addOtherInfo("nodeId", "localhost:54450");
entityToStore.addOtherInfo("nodeHttpAddress", "localhost:8042");
entityToStore.addOtherInfo("containerId",
"container_1429158534256_0001_01_000002");
entityToStore.addOtherInfo("status", "RUNNING");
entityToStore.addRelatedEntity(tezTaskId, "TEZ_TASK_ID_1");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
for (int i = 0; i < num; ++i) {
entityToStore.setEntityId(tezEntityId + i);
store.put(entities);
}
long duration = System.currentTimeMillis() - start;
Log.info("Duration for " + num + ": " + duration);
}
public static void main(String[] args) throws Exception {
TestRollingLevelDBTimelineStore store =
new TestRollingLevelDBTimelineStore();
store.setup();
store.testStorePerformance();
store.tearDown();
}
}
| 16,167 | 36.775701 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
public class TimelineStoreTestUtils {
protected static final List<TimelineEvent> EMPTY_EVENTS =
Collections.emptyList();
protected static final Map<String, Object> EMPTY_MAP =
Collections.emptyMap();
protected static final Map<String, Set<Object>> EMPTY_PRIMARY_FILTERS =
Collections.emptyMap();
protected static final Map<String, Set<String>> EMPTY_REL_ENTITIES =
Collections.emptyMap();
protected TimelineStore store;
protected String entityId1;
protected String entityType1;
protected String entityId1b;
protected String entityId2;
protected String entityType2;
protected String entityId4;
protected String entityType4;
protected String entityId5;
protected String entityType5;
protected String entityId6;
protected String entityId7;
protected String entityType7;
protected Map<String, Set<Object>> primaryFilters;
protected Map<String, Object> secondaryFilters;
protected Map<String, Object> allFilters;
protected Map<String, Object> otherInfo;
protected Map<String, Set<String>> relEntityMap;
protected Map<String, Set<String>> relEntityMap2;
protected NameValuePair userFilter;
protected NameValuePair numericFilter1;
protected NameValuePair numericFilter2;
protected NameValuePair numericFilter3;
protected Collection<NameValuePair> goodTestingFilters;
protected Collection<NameValuePair> badTestingFilters;
protected TimelineEvent ev1;
protected TimelineEvent ev2;
protected TimelineEvent ev3;
protected TimelineEvent ev4;
protected Map<String, Object> eventInfo;
protected List<TimelineEvent> events1;
protected List<TimelineEvent> events2;
protected long beforeTs;
protected String domainId1;
protected String domainId2;
/**
* Load test entity data into the given store
*/
protected void loadTestEntityData() throws IOException {
beforeTs = System.currentTimeMillis()-1;
TimelineEntities entities = new TimelineEntities();
Map<String, Set<Object>> primaryFilters =
new HashMap<String, Set<Object>>();
Set<Object> l1 = new HashSet<Object>();
l1.add("username");
Set<Object> l2 = new HashSet<Object>();
l2.add(Integer.MAX_VALUE);
Set<Object> l3 = new HashSet<Object>();
l3.add("123abc");
Set<Object> l4 = new HashSet<Object>();
l4.add((long)Integer.MAX_VALUE + 1l);
primaryFilters.put("user", l1);
primaryFilters.put("appname", l2);
primaryFilters.put("other", l3);
primaryFilters.put("long", l4);
Map<String, Object> secondaryFilters = new HashMap<String, Object>();
secondaryFilters.put("startTime", 123456);
secondaryFilters.put("status", "RUNNING");
Map<String, Object> otherInfo1 = new HashMap<String, Object>();
otherInfo1.put("info1", "val1");
otherInfo1.putAll(secondaryFilters);
String entityId1 = "id_1";
String entityType1 = "type_1";
String entityId1b = "id_2";
String entityId2 = "id_2";
String entityType2 = "type_2";
String entityId4 = "id_4";
String entityType4 = "type_4";
String entityId5 = "id_5";
String entityType5 = "type_5";
String entityId6 = "id_6";
String entityId7 = "id_7";
String entityType7 = "type_7";
Map<String, Set<String>> relatedEntities =
new HashMap<String, Set<String>>();
relatedEntities.put(entityType2, Collections.singleton(entityId2));
TimelineEvent ev3 = createEvent(789l, "launch_event", null);
TimelineEvent ev4 = createEvent(0l, "init_event", null);
List<TimelineEvent> events = new ArrayList<TimelineEvent>();
events.add(ev3);
events.add(ev4);
entities.setEntities(Collections.singletonList(createEntity(entityId2,
entityType2, null, events, null, null, null, "domain_id_1")));
TimelinePutResponse response = store.put(entities);
assertEquals(0, response.getErrors().size());
TimelineEvent ev1 = createEvent(123l, "start_event", null);
entities.setEntities(Collections.singletonList(createEntity(entityId1,
entityType1, 123l, Collections.singletonList(ev1),
relatedEntities, primaryFilters, otherInfo1, "domain_id_1")));
response = store.put(entities);
assertEquals(0, response.getErrors().size());
entities.setEntities(Collections.singletonList(createEntity(entityId1b,
entityType1, null, Collections.singletonList(ev1), relatedEntities,
primaryFilters, otherInfo1, "domain_id_1")));
response = store.put(entities);
assertEquals(0, response.getErrors().size());
Map<String, Object> eventInfo = new HashMap<String, Object>();
eventInfo.put("event info 1", "val1");
TimelineEvent ev2 = createEvent(456l, "end_event", eventInfo);
Map<String, Object> otherInfo2 = new HashMap<String, Object>();
otherInfo2.put("info2", "val2");
entities.setEntities(Collections.singletonList(createEntity(entityId1,
entityType1, null, Collections.singletonList(ev2), null,
primaryFilters, otherInfo2, "domain_id_1")));
response = store.put(entities);
assertEquals(0, response.getErrors().size());
entities.setEntities(Collections.singletonList(createEntity(entityId1b,
entityType1, 789l, Collections.singletonList(ev2), null,
primaryFilters, otherInfo2, "domain_id_1")));
response = store.put(entities);
assertEquals(0, response.getErrors().size());
entities.setEntities(Collections.singletonList(createEntity(
"badentityid", "badentity", null, null, null, null, otherInfo1,
"domain_id_1")));
response = store.put(entities);
assertEquals(1, response.getErrors().size());
TimelinePutError error = response.getErrors().get(0);
assertEquals("badentityid", error.getEntityId());
assertEquals("badentity", error.getEntityType());
assertEquals(TimelinePutError.NO_START_TIME, error.getErrorCode());
relatedEntities.clear();
relatedEntities.put(entityType5, Collections.singleton(entityId5));
entities.setEntities(Collections.singletonList(createEntity(entityId4,
entityType4, 42l, null, relatedEntities, null, null,
"domain_id_1")));
response = store.put(entities);
relatedEntities.clear();
otherInfo1.put("info2", "val2");
entities.setEntities(Collections.singletonList(createEntity(entityId6,
entityType1, 61l, null, relatedEntities, primaryFilters, otherInfo1,
"domain_id_2")));
response = store.put(entities);
relatedEntities.clear();
relatedEntities.put(entityType1, Collections.singleton(entityId1));
entities.setEntities(Collections.singletonList(createEntity(entityId7,
entityType7, 62l, null, relatedEntities, null, null,
"domain_id_2")));
response = store.put(entities);
assertEquals(1, response.getErrors().size());
assertEquals(entityType7, response.getErrors().get(0).getEntityType());
assertEquals(entityId7, response.getErrors().get(0).getEntityId());
assertEquals(TimelinePutError.FORBIDDEN_RELATION,
response.getErrors().get(0).getErrorCode());
if (store instanceof LeveldbTimelineStore) {
LeveldbTimelineStore leveldb = (LeveldbTimelineStore) store;
entities.setEntities(Collections.singletonList(createEntity(
"OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", 63l, null, null, null, null,
null)));
leveldb.putWithNoDomainId(entities);
entities.setEntities(Collections.singletonList(createEntity(
"OLD_ENTITY_ID_2", "OLD_ENTITY_TYPE_1", 64l, null, null, null, null,
null)));
leveldb.putWithNoDomainId(entities);
}
}
/**
* Load verification entity data
*/
protected void loadVerificationEntityData() throws Exception {
userFilter = new NameValuePair("user", "username");
numericFilter1 = new NameValuePair("appname", Integer.MAX_VALUE);
numericFilter2 = new NameValuePair("long", (long)Integer.MAX_VALUE + 1l);
numericFilter3 = new NameValuePair("other", "123abc");
goodTestingFilters = new ArrayList<NameValuePair>();
goodTestingFilters.add(new NameValuePair("appname", Integer.MAX_VALUE));
goodTestingFilters.add(new NameValuePair("status", "RUNNING"));
badTestingFilters = new ArrayList<NameValuePair>();
badTestingFilters.add(new NameValuePair("appname", Integer.MAX_VALUE));
badTestingFilters.add(new NameValuePair("status", "FINISHED"));
primaryFilters = new HashMap<String, Set<Object>>();
Set<Object> l1 = new HashSet<Object>();
l1.add("username");
Set<Object> l2 = new HashSet<Object>();
l2.add(Integer.MAX_VALUE);
Set<Object> l3 = new HashSet<Object>();
l3.add("123abc");
Set<Object> l4 = new HashSet<Object>();
l4.add((long)Integer.MAX_VALUE + 1l);
primaryFilters.put("user", l1);
primaryFilters.put("appname", l2);
primaryFilters.put("other", l3);
primaryFilters.put("long", l4);
secondaryFilters = new HashMap<String, Object>();
secondaryFilters.put("startTime", 123456);
secondaryFilters.put("status", "RUNNING");
allFilters = new HashMap<String, Object>();
allFilters.putAll(secondaryFilters);
for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
for (Object o : pf.getValue()) {
allFilters.put(pf.getKey(), o);
}
}
otherInfo = new HashMap<String, Object>();
otherInfo.put("info1", "val1");
otherInfo.put("info2", "val2");
otherInfo.putAll(secondaryFilters);
entityId1 = "id_1";
entityType1 = "type_1";
entityId1b = "id_2";
entityId2 = "id_2";
entityType2 = "type_2";
entityId4 = "id_4";
entityType4 = "type_4";
entityId5 = "id_5";
entityType5 = "type_5";
entityId6 = "id_6";
entityId7 = "id_7";
entityType7 = "type_7";
ev1 = createEvent(123l, "start_event", null);
eventInfo = new HashMap<String, Object>();
eventInfo.put("event info 1", "val1");
ev2 = createEvent(456l, "end_event", eventInfo);
events1 = new ArrayList<TimelineEvent>();
events1.add(ev2);
events1.add(ev1);
relEntityMap =
new HashMap<String, Set<String>>();
Set<String> ids = new HashSet<String>();
ids.add(entityId1);
ids.add(entityId1b);
relEntityMap.put(entityType1, ids);
relEntityMap2 =
new HashMap<String, Set<String>>();
relEntityMap2.put(entityType4, Collections.singleton(entityId4));
ev3 = createEvent(789l, "launch_event", null);
ev4 = createEvent(0l, "init_event", null);
events2 = new ArrayList<TimelineEvent>();
events2.add(ev3);
events2.add(ev4);
domainId1 = "domain_id_1";
domainId2 = "domain_id_2";
}
private TimelineDomain domain1;
private TimelineDomain domain2;
private TimelineDomain domain3;
private long elapsedTime;
protected void loadTestDomainData() throws IOException {
domain1 = new TimelineDomain();
domain1.setId("domain_id_1");
domain1.setDescription("description_1");
domain1.setOwner("owner_1");
domain1.setReaders("reader_user_1 reader_group_1");
domain1.setWriters("writer_user_1 writer_group_1");
store.put(domain1);
domain2 = new TimelineDomain();
domain2.setId("domain_id_2");
domain2.setDescription("description_2");
domain2.setOwner("owner_2");
domain2.setReaders("reader_user_2 reader_group_2");
domain2.setWriters("writer_user_2 writer_group_2");
store.put(domain2);
// Wait a second before updating the domain information
elapsedTime = 1000;
try {
Thread.sleep(elapsedTime);
} catch (InterruptedException e) {
throw new IOException(e);
}
domain2.setDescription("description_3");
domain2.setOwner("owner_3");
domain2.setReaders("reader_user_3 reader_group_3");
domain2.setWriters("writer_user_3 writer_group_3");
store.put(domain2);
domain3 = new TimelineDomain();
domain3.setId("domain_id_4");
domain3.setDescription("description_4");
domain3.setOwner("owner_1");
domain3.setReaders("reader_user_4 reader_group_4");
domain3.setWriters("writer_user_4 writer_group_4");
store.put(domain3);
TimelineEntities entities = new TimelineEntities();
if (store instanceof LeveldbTimelineStore) {
LeveldbTimelineStore leveldb = (LeveldbTimelineStore) store;
entities.setEntities(Collections.singletonList(createEntity(
"ACL_ENTITY_ID_11", "ACL_ENTITY_TYPE_1", 63l, null, null, null, null,
"domain_id_4")));
leveldb.put(entities);
entities.setEntities(Collections.singletonList(createEntity(
"ACL_ENTITY_ID_22", "ACL_ENTITY_TYPE_1", 64l, null, null, null, null,
"domain_id_2")));
leveldb.put(entities);
}
}
public void testGetSingleEntity() throws IOException {
// test getting entity info
verifyEntityInfo(null, null, null, null, null, null,
store.getEntity("id_1", "type_2", EnumSet.allOf(Field.class)),
domainId1);
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, 123l, store.getEntity(entityId1,
entityType1, EnumSet.allOf(Field.class)), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, 123l, store.getEntity(entityId1b,
entityType1, EnumSet.allOf(Field.class)), domainId1);
verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, 0l, store.getEntity(entityId2,
entityType2, EnumSet.allOf(Field.class)), domainId1);
verifyEntityInfo(entityId4, entityType4, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, 42l, store.getEntity(entityId4,
entityType4, EnumSet.allOf(Field.class)), domainId1);
verifyEntityInfo(entityId5, entityType5, EMPTY_EVENTS, relEntityMap2,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, 42l, store.getEntity(entityId5,
entityType5, EnumSet.allOf(Field.class)), domainId1);
// test getting single fields
verifyEntityInfo(entityId1, entityType1, events1, null, null, null,
store.getEntity(entityId1, entityType1, EnumSet.of(Field.EVENTS)),
domainId1);
verifyEntityInfo(entityId1, entityType1, Collections.singletonList(ev2),
null, null, null, store.getEntity(entityId1, entityType1,
EnumSet.of(Field.LAST_EVENT_ONLY)), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, store.getEntity(entityId1b, entityType1,
null), domainId1);
verifyEntityInfo(entityId1, entityType1, null, null, primaryFilters, null,
store.getEntity(entityId1, entityType1,
EnumSet.of(Field.PRIMARY_FILTERS)), domainId1);
verifyEntityInfo(entityId1, entityType1, null, null, null, otherInfo,
store.getEntity(entityId1, entityType1, EnumSet.of(Field.OTHER_INFO)),
domainId1);
verifyEntityInfo(entityId2, entityType2, null, relEntityMap, null, null,
store.getEntity(entityId2, entityType2,
EnumSet.of(Field.RELATED_ENTITIES)), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, store.getEntity(entityId6, entityType1,
EnumSet.allOf(Field.class)), domainId2);
// entity is created, but it doesn't relate to <entityType1, entityId1>
verifyEntityInfo(entityId7, entityType7, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, store.getEntity(entityId7, entityType7,
EnumSet.allOf(Field.class)), domainId2);
}
protected List<TimelineEntity> getEntities(String entityType)
throws IOException {
return store.getEntities(entityType, null, null, null, null, null,
null, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesWithPrimaryFilter(
String entityType, NameValuePair primaryFilter) throws IOException {
return store.getEntities(entityType, null, null, null, null, null,
primaryFilter, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromId(String entityType,
String fromId) throws IOException {
return store.getEntities(entityType, null, null, null, fromId, null,
null, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromTs(String entityType,
long fromTs) throws IOException {
return store.getEntities(entityType, null, null, null, null, fromTs,
null, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromIdWithPrimaryFilter(
String entityType, NameValuePair primaryFilter, String fromId)
throws IOException {
return store.getEntities(entityType, null, null, null, fromId, null,
primaryFilter, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromTsWithPrimaryFilter(
String entityType, NameValuePair primaryFilter, long fromTs)
throws IOException {
return store.getEntities(entityType, null, null, null, null, fromTs,
primaryFilter, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromIdWithWindow(String entityType,
Long windowEnd, String fromId) throws IOException {
return store.getEntities(entityType, null, null, windowEnd, fromId, null,
null, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesFromIdWithPrimaryFilterAndWindow(
String entityType, Long windowEnd, String fromId,
NameValuePair primaryFilter) throws IOException {
return store.getEntities(entityType, null, null, windowEnd, fromId, null,
primaryFilter, null, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesWithFilters(String entityType,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters)
throws IOException {
return store.getEntities(entityType, null, null, null, null, null,
primaryFilter, secondaryFilters, null, null).getEntities();
}
protected List<TimelineEntity> getEntitiesWithFilters(String entityType,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields) throws IOException {
return store.getEntities(entityType, null, null, null, null, null,
primaryFilter, secondaryFilters, fields, null).getEntities();
}
protected List<TimelineEntity> getEntities(String entityType, Long limit,
Long windowStart, Long windowEnd, NameValuePair primaryFilter,
EnumSet<Field> fields) throws IOException {
return store.getEntities(entityType, limit, windowStart, windowEnd, null,
null, primaryFilter, null, fields, null).getEntities();
}
public void testGetEntities() throws IOException {
// test getting entities
assertEquals("nonzero entities size for nonexistent type", 0,
getEntities("type_0").size());
assertEquals("nonzero entities size for nonexistent type", 0,
getEntities("type_3").size());
assertEquals("nonzero entities size for nonexistent type", 0,
getEntities("type_6").size());
assertEquals("nonzero entities size for nonexistent type", 0,
getEntitiesWithPrimaryFilter("type_0", userFilter).size());
assertEquals("nonzero entities size for nonexistent type", 0,
getEntitiesWithPrimaryFilter("type_3", userFilter).size());
assertEquals("nonzero entities size for nonexistent type", 0,
getEntitiesWithPrimaryFilter("type_6", userFilter).size());
List<TimelineEntity> entities = getEntities("type_1");
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntities("type_2");
assertEquals(1, entities.size());
verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, entities.get(0), domainId1);
entities = getEntities("type_1", 1l, null, null, null,
EnumSet.allOf(Field.class));
assertEquals(1, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
entities = getEntities("type_1", 1l, 0l, null, null,
EnumSet.allOf(Field.class));
assertEquals(1, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
entities = getEntities("type_1", null, 234l, null, null,
EnumSet.allOf(Field.class));
assertEquals(0, entities.size());
entities = getEntities("type_1", null, 123l, null, null,
EnumSet.allOf(Field.class));
assertEquals(0, entities.size());
entities = getEntities("type_1", null, 234l, 345l, null,
EnumSet.allOf(Field.class));
assertEquals(0, entities.size());
entities = getEntities("type_1", null, null, 345l, null,
EnumSet.allOf(Field.class));
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntities("type_1", null, null, 123l, null,
EnumSet.allOf(Field.class));
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
}
public void testGetEntitiesWithFromId() throws IOException {
List<TimelineEntity> entities = getEntitiesFromId("type_1", entityId1);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesFromId("type_1", entityId1b);
assertEquals(2, entities.size());
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId2);
entities = getEntitiesFromId("type_1", entityId6);
assertEquals(1, entities.size());
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId2);
entities = getEntitiesFromIdWithWindow("type_1", 0l, entityId6);
assertEquals(0, entities.size());
entities = getEntitiesFromId("type_2", "a");
assertEquals(0, entities.size());
entities = getEntitiesFromId("type_2", entityId2);
assertEquals(1, entities.size());
verifyEntityInfo(entityId2, entityType2, events2, relEntityMap,
EMPTY_PRIMARY_FILTERS, EMPTY_MAP, entities.get(0), domainId1);
entities = getEntitiesFromIdWithWindow("type_2", -456l, null);
assertEquals(0, entities.size());
entities = getEntitiesFromIdWithWindow("type_2", -456l, "a");
assertEquals(0, entities.size());
entities = getEntitiesFromIdWithWindow("type_2", 0l, null);
assertEquals(1, entities.size());
entities = getEntitiesFromIdWithWindow("type_2", 0l, entityId2);
assertEquals(1, entities.size());
// same tests with primary filters
entities = getEntitiesFromIdWithPrimaryFilter("type_1", userFilter,
entityId1);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesFromIdWithPrimaryFilter("type_1", userFilter,
entityId1b);
assertEquals(2, entities.size());
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId2);
entities = getEntitiesFromIdWithPrimaryFilter("type_1", userFilter,
entityId6);
assertEquals(1, entities.size());
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId2);
entities = getEntitiesFromIdWithPrimaryFilterAndWindow("type_1", 0l,
entityId6, userFilter);
assertEquals(0, entities.size());
entities = getEntitiesFromIdWithPrimaryFilter("type_2", userFilter, "a");
assertEquals(0, entities.size());
}
public void testGetEntitiesWithFromTs() throws IOException {
assertEquals(0, getEntitiesFromTs("type_1", beforeTs).size());
assertEquals(0, getEntitiesFromTs("type_2", beforeTs).size());
assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
beforeTs).size());
long afterTs = System.currentTimeMillis();
assertEquals(3, getEntitiesFromTs("type_1", afterTs).size());
assertEquals(1, getEntitiesFromTs("type_2", afterTs).size());
assertEquals(3, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
afterTs).size());
assertEquals(3, getEntities("type_1").size());
assertEquals(1, getEntities("type_2").size());
assertEquals(3, getEntitiesWithPrimaryFilter("type_1", userFilter).size());
// check insert time is not overwritten
long beforeTs = this.beforeTs;
loadTestEntityData();
assertEquals(0, getEntitiesFromTs("type_1", beforeTs).size());
assertEquals(0, getEntitiesFromTs("type_2", beforeTs).size());
assertEquals(0, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
beforeTs).size());
assertEquals(3, getEntitiesFromTs("type_1", afterTs).size());
assertEquals(1, getEntitiesFromTs("type_2", afterTs).size());
assertEquals(3, getEntitiesFromTsWithPrimaryFilter("type_1", userFilter,
afterTs).size());
}
public void testGetEntitiesWithPrimaryFilters() throws IOException {
// test using primary filter
assertEquals("nonzero entities size for primary filter", 0,
getEntitiesWithPrimaryFilter("type_1",
new NameValuePair("none", "none")).size());
assertEquals("nonzero entities size for primary filter", 0,
getEntitiesWithPrimaryFilter("type_2",
new NameValuePair("none", "none")).size());
assertEquals("nonzero entities size for primary filter", 0,
getEntitiesWithPrimaryFilter("type_3",
new NameValuePair("none", "none")).size());
List<TimelineEntity> entities = getEntitiesWithPrimaryFilter("type_1",
userFilter);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesWithPrimaryFilter("type_1", numericFilter1);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesWithPrimaryFilter("type_1", numericFilter2);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesWithPrimaryFilter("type_1", numericFilter3);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
entities = getEntitiesWithPrimaryFilter("type_2", userFilter);
assertEquals(0, entities.size());
entities = getEntities("type_1", 1l, null, null, userFilter, null);
assertEquals(1, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
entities = getEntities("type_1", 1l, 0l, null, userFilter, null);
assertEquals(1, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
entities = getEntities("type_1", null, 234l, null, userFilter, null);
assertEquals(0, entities.size());
entities = getEntities("type_1", null, 234l, 345l, userFilter, null);
assertEquals(0, entities.size());
entities = getEntities("type_1", null, null, 345l, userFilter, null);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1, EMPTY_EVENTS, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(2), domainId2);
}
public void testGetEntitiesWithSecondaryFilters() throws IOException {
for (int i = 0; i < 4; ++i) {
// Verify the secondary filter works both other info is included or not.
EnumSet<Field> fields = null;
if (i == 1) {
fields = EnumSet.noneOf(Field.class);
} else if (i == 2) {
fields = EnumSet.of(Field.PRIMARY_FILTERS);
} else if (i == 3) {
fields = EnumSet.of(Field.OTHER_INFO);
}
// test using secondary filter
List<TimelineEntity> entities = getEntitiesWithFilters("type_1", null,
goodTestingFilters, fields);
assertEquals(3, entities.size());
verifyEntityInfo(entityId1, entityType1,
(i == 0 ? events1 : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1,
(i == 0 ? events1 : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1,
(i == 0 ? EMPTY_EVENTS : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(2), domainId2);
entities =
getEntitiesWithFilters("type_1", userFilter, goodTestingFilters, fields);
assertEquals(3, entities.size());
if (i == 0) {
verifyEntityInfo(entityId1, entityType1,
(i == 0 ? events1 : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(0), domainId1);
verifyEntityInfo(entityId1b, entityType1,
(i == 0 ? events1 : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(1), domainId1);
verifyEntityInfo(entityId6, entityType1,
(i == 0 ? EMPTY_EVENTS : null),
(i == 0 ? EMPTY_REL_ENTITIES : null),
(i == 0 || i == 2 ? primaryFilters : null),
(i == 0 || i == 3 ? otherInfo : null), entities.get(2), domainId2);
}
entities = getEntitiesWithFilters("type_1", null,
Collections.singleton(new NameValuePair("user", "none")), fields);
assertEquals(0, entities.size());
entities =
getEntitiesWithFilters("type_1", null, badTestingFilters, fields);
assertEquals(0, entities.size());
entities =
getEntitiesWithFilters("type_1", userFilter, badTestingFilters, fields);
assertEquals(0, entities.size());
entities =
getEntitiesWithFilters("type_5", null, badTestingFilters, fields);
assertEquals(0, entities.size());
}
}
public void testGetEvents() throws IOException {
// test getting entity timelines
SortedSet<String> sortedSet = new TreeSet<String>();
sortedSet.add(entityId1);
List<EventsOfOneEntity> timelines =
store.getEntityTimelines(entityType1, sortedSet, null, null,
null, null).getAllEvents();
assertEquals(1, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2, ev1);
sortedSet.add(entityId1b);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2, ev1);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, 1l,
null, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
345l, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
123l, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, 345l, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev1);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, 123l, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev1);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, null, Collections.singleton("end_event")).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entityId1b, entityType1, ev2);
sortedSet.add(entityId2);
timelines = store.getEntityTimelines(entityType2, sortedSet, null,
null, null, null).getAllEvents();
assertEquals(1, timelines.size());
verifyEntityTimeline(timelines.get(0), entityId2, entityType2, ev3, ev4);
}
/**
* Verify a single entity and its start time
*/
protected static void verifyEntityInfo(String entityId, String entityType,
List<TimelineEvent> events, Map<String, Set<String>> relatedEntities,
Map<String, Set<Object>> primaryFilters, Map<String, Object> otherInfo,
Long startTime, TimelineEntity retrievedEntityInfo, String domainId) {
verifyEntityInfo(entityId, entityType, events, relatedEntities,
primaryFilters, otherInfo, retrievedEntityInfo, domainId);
assertEquals(startTime, retrievedEntityInfo.getStartTime());
}
/**
* Verify a single entity
*/
protected static void verifyEntityInfo(String entityId, String entityType,
List<TimelineEvent> events, Map<String, Set<String>> relatedEntities,
Map<String, Set<Object>> primaryFilters, Map<String, Object> otherInfo,
TimelineEntity retrievedEntityInfo, String domainId) {
if (entityId == null) {
assertNull(retrievedEntityInfo);
return;
}
assertEquals(entityId, retrievedEntityInfo.getEntityId());
assertEquals(entityType, retrievedEntityInfo.getEntityType());
assertEquals(domainId, retrievedEntityInfo.getDomainId());
if (events == null) {
assertNull(retrievedEntityInfo.getEvents());
} else {
assertEquals(events, retrievedEntityInfo.getEvents());
}
if (relatedEntities == null) {
assertNull(retrievedEntityInfo.getRelatedEntities());
} else {
assertEquals(relatedEntities, retrievedEntityInfo.getRelatedEntities());
}
if (primaryFilters == null) {
assertNull(retrievedEntityInfo.getPrimaryFilters());
} else {
assertTrue(primaryFilters.equals(
retrievedEntityInfo.getPrimaryFilters()));
}
if (otherInfo == null) {
assertNull(retrievedEntityInfo.getOtherInfo());
} else {
assertTrue(otherInfo.equals(retrievedEntityInfo.getOtherInfo()));
}
}
/**
* Verify timeline events
*/
private static void verifyEntityTimeline(
EventsOfOneEntity retrievedEvents, String entityId, String entityType,
TimelineEvent... actualEvents) {
assertEquals(entityId, retrievedEvents.getEntityId());
assertEquals(entityType, retrievedEvents.getEntityType());
assertEquals(actualEvents.length, retrievedEvents.getEvents().size());
for (int i = 0; i < actualEvents.length; i++) {
assertEquals(actualEvents[i], retrievedEvents.getEvents().get(i));
}
}
/**
* Create a test entity
*/
protected static TimelineEntity createEntity(String entityId, String entityType,
Long startTime, List<TimelineEvent> events,
Map<String, Set<String>> relatedEntities,
Map<String, Set<Object>> primaryFilters,
Map<String, Object> otherInfo, String domainId) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityId(entityId);
entity.setEntityType(entityType);
entity.setStartTime(startTime);
entity.setEvents(events);
if (relatedEntities != null) {
for (Entry<String, Set<String>> e : relatedEntities.entrySet()) {
for (String v : e.getValue()) {
entity.addRelatedEntity(e.getKey(), v);
}
}
} else {
entity.setRelatedEntities(null);
}
entity.setPrimaryFilters(primaryFilters);
entity.setOtherInfo(otherInfo);
entity.setDomainId(domainId);
return entity;
}
/**
* Create a test event
*/
private static TimelineEvent createEvent(long timestamp, String type, Map<String,
Object> info) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(timestamp);
event.setEventType(type);
event.setEventInfo(info);
return event;
}
public void testGetDomain() throws IOException {
TimelineDomain actualDomain1 =
store.getDomain(domain1.getId());
verifyDomainInfo(domain1, actualDomain1);
assertTrue(actualDomain1.getCreatedTime() > 0);
assertTrue(actualDomain1.getModifiedTime() > 0);
assertEquals(
actualDomain1.getCreatedTime(), actualDomain1.getModifiedTime());
TimelineDomain actualDomain2 =
store.getDomain(domain2.getId());
verifyDomainInfo(domain2, actualDomain2);
assertEquals("domain_id_2", actualDomain2.getId());
assertTrue(actualDomain2.getCreatedTime() > 0);
assertTrue(actualDomain2.getModifiedTime() > 0);
assertTrue(
actualDomain2.getCreatedTime() < actualDomain2.getModifiedTime());
}
public void testGetDomains() throws IOException {
TimelineDomains actualDomains =
store.getDomains("owner_1");
assertEquals(2, actualDomains.getDomains().size());
verifyDomainInfo(domain3, actualDomains.getDomains().get(0));
verifyDomainInfo(domain1, actualDomains.getDomains().get(1));
// owner without any domain
actualDomains = store.getDomains("owner_4");
assertEquals(0, actualDomains.getDomains().size());
}
private static void verifyDomainInfo(
TimelineDomain expected, TimelineDomain actual) {
assertEquals(expected.getId(), actual.getId());
assertEquals(expected.getDescription(), actual.getDescription());
assertEquals(expected.getOwner(), actual.getOwner());
assertEquals(expected.getReaders(), actual.getReaders());
assertEquals(expected.getWriters(), actual.getWriters());
}
}
| 44,626 | 42.117874 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestMemoryTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
public class TestMemoryTimelineStore extends TimelineStoreTestUtils {
@Before
public void setup() throws Exception {
store = new MemoryTimelineStore();
store.init(new YarnConfiguration());
store.start();
loadTestEntityData();
loadVerificationEntityData();
loadTestDomainData();
}
@After
public void tearDown() throws Exception {
store.stop();
}
public TimelineStore getTimelineStore() {
return store;
}
@Test
public void testGetSingleEntity() throws IOException {
super.testGetSingleEntity();
}
@Test
public void testGetEntities() throws IOException {
super.testGetEntities();
}
@Test
public void testGetEntitiesWithFromId() throws IOException {
super.testGetEntitiesWithFromId();
}
@Test
public void testGetEntitiesWithFromTs() throws IOException {
super.testGetEntitiesWithFromTs();
}
@Test
public void testGetEntitiesWithPrimaryFilters() throws IOException {
super.testGetEntitiesWithPrimaryFilters();
}
@Test
public void testGetEntitiesWithSecondaryFilters() throws IOException {
super.testGetEntitiesWithSecondaryFilters();
}
@Test
public void testGetEvents() throws IOException {
super.testGetEvents();
}
@Test
public void testGetDomain() throws IOException {
super.testGetDomain();
}
@Test
public void testGetDomains() throws IOException {
super.testGetDomains();
}
}
| 2,567 | 25.474227 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
import org.mockito.Mockito;
public class TestTimelineAuthenticationFilterInitializer {
@Test
public void testProxyUserConfiguration() {
FilterContainer container = Mockito.mock(FilterContainer.class);
for (int i = 0; i < 3; ++i) {
Configuration conf = new YarnConfiguration();
switch (i) {
case 0:
// hadoop.proxyuser prefix
conf.set("hadoop.proxyuser.foo.hosts", "*");
conf.set("hadoop.proxyuser.foo.users", "*");
conf.set("hadoop.proxyuser.foo.groups", "*");
break;
case 1:
// yarn.timeline-service.http-authentication.proxyuser prefix
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.hosts",
"*");
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.users",
"*");
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.groups",
"*");
break;
case 2:
// hadoop.proxyuser prefix has been overwritten by
// yarn.timeline-service.http-authentication.proxyuser prefix
conf.set("hadoop.proxyuser.foo.hosts", "bar");
conf.set("hadoop.proxyuser.foo.users", "bar");
conf.set("hadoop.proxyuser.foo.groups", "bar");
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.hosts",
"*");
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.users",
"*");
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.groups",
"*");
break;
default:
break;
}
TimelineAuthenticationFilterInitializer initializer =
new TimelineAuthenticationFilterInitializer();
initializer.initFilter(container, conf);
Assert.assertEquals(
"*", initializer.filterConfig.get("proxyuser.foo.hosts"));
Assert.assertEquals(
"*", initializer.filterConfig.get("proxyuser.foo.users"));
Assert.assertEquals(
"*", initializer.filterConfig.get("proxyuser.foo.groups"));
}
}
}
| 3,191 | 37.926829 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineACLsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.junit.Assert;
import org.junit.Test;
public class TestTimelineACLsManager {
private static TimelineDomain domain;
static {
domain = new TimelineDomain();
domain.setId("domain_id_1");
domain.setOwner("owner");
domain.setReaders("reader");
domain.setWriters("writer");
}
@Test
public void testYarnACLsNotEnabledForEntity() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
timelineACLsManager.setTimelineStore(new TestTimelineStore());
TimelineEntity entity = new TimelineEntity();
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER
.toString(), "owner");
entity.setDomainId("domain_id_1");
Assert.assertTrue(
"Always true when ACLs are not enabled",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("user"),
ApplicationAccessType.VIEW_APP, entity));
Assert.assertTrue(
"Always true when ACLs are not enabled",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("user"),
ApplicationAccessType.MODIFY_APP, entity));
}
@Test
public void testYarnACLsEnabledForEntity() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
timelineACLsManager.setTimelineStore(new TestTimelineStore());
TimelineEntity entity = new TimelineEntity();
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER
.toString(), "owner");
entity.setDomainId("domain_id_1");
Assert.assertTrue(
"Owner should be allowed to view",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"),
ApplicationAccessType.VIEW_APP, entity));
Assert.assertTrue(
"Reader should be allowed to view",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("reader"),
ApplicationAccessType.VIEW_APP, entity));
Assert.assertFalse(
"Other shouldn't be allowed to view",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("other"),
ApplicationAccessType.VIEW_APP, entity));
Assert.assertTrue(
"Admin should be allowed to view",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("admin"),
ApplicationAccessType.VIEW_APP, entity));
Assert.assertTrue(
"Owner should be allowed to modify",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"),
ApplicationAccessType.MODIFY_APP, entity));
Assert.assertTrue(
"Writer should be allowed to modify",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("writer"),
ApplicationAccessType.MODIFY_APP, entity));
Assert.assertFalse(
"Other shouldn't be allowed to modify",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("other"),
ApplicationAccessType.MODIFY_APP, entity));
Assert.assertTrue(
"Admin should be allowed to modify",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("admin"),
ApplicationAccessType.MODIFY_APP, entity));
}
@Test
public void testCorruptedOwnerInfoForEntity() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "owner");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
timelineACLsManager.setTimelineStore(new TestTimelineStore());
TimelineEntity entity = new TimelineEntity();
try {
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"),
ApplicationAccessType.VIEW_APP, entity);
Assert.fail("Exception is expected");
} catch (YarnException e) {
Assert.assertTrue("It's not the exact expected exception", e.getMessage()
.contains("doesn't exist."));
}
}
@Test
public void testYarnACLsNotEnabledForDomain() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineDomain domain = new TimelineDomain();
domain.setOwner("owner");
Assert.assertTrue(
"Always true when ACLs are not enabled",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("user"), domain));
}
@Test
public void testYarnACLsEnabledForDomain() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineDomain domain = new TimelineDomain();
domain.setOwner("owner");
Assert.assertTrue(
"Owner should be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"), domain));
Assert.assertFalse(
"Other shouldn't be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("other"), domain));
Assert.assertTrue(
"Admin should be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("admin"), domain));
}
@Test
public void testCorruptedOwnerInfoForDomain() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "owner");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineDomain domain = new TimelineDomain();
try {
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"), domain);
Assert.fail("Exception is expected");
} catch (YarnException e) {
Assert.assertTrue("It's not the exact expected exception", e.getMessage()
.contains("is corrupted."));
}
}
private static class TestTimelineStore extends MemoryTimelineStore {
@Override
public TimelineDomain getDomain(
String domainId) throws IOException {
if (domainId == null) {
return null;
} else {
return domain;
}
}
}
}
| 8,420 | 38.350467 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.Callable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class TestTimelineAuthenticationFilter {
private static final String FOO_USER = "foo";
private static final String BAR_USER = "bar";
private static final String HTTP_USER = "HTTP";
private static final File testRootDir = new File(
System.getProperty("test.build.dir", "target/test-dir"),
TestTimelineAuthenticationFilter.class.getName() + "-root");
private static File httpSpnegoKeytabFile = new File(
KerberosTestUtils.getKeytabFile());
private static String httpSpnegoPrincipal =
KerberosTestUtils.getServerPrincipal();
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/"
+ TestTimelineAuthenticationFilter.class.getSimpleName();
@Parameterized.Parameters
public static Collection<Object[]> withSsl() {
return Arrays.asList(new Object[][] { { false }, { true } });
}
private static MiniKdc testMiniKDC;
private static String keystoresDir;
private static String sslConfDir;
private static ApplicationHistoryServer testTimelineServer;
private static Configuration conf;
private static boolean withSsl;
public TestTimelineAuthenticationFilter(boolean withSsl) {
TestTimelineAuthenticationFilter.withSsl = withSsl;
}
@BeforeClass
public static void setup() {
try {
testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
testMiniKDC.start();
testMiniKDC.createPrincipal(
httpSpnegoKeytabFile, HTTP_USER + "/localhost");
} catch (Exception e) {
assertTrue("Couldn't setup MiniKDC", false);
}
try {
testTimelineServer = new ApplicationHistoryServer();
conf = new Configuration(false);
conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
"kerberos");
conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal);
conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
KerberosAuthenticationHandler.KEYTAB,
httpSpnegoKeytabFile.getAbsolutePath());
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
httpSpnegoPrincipal);
conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
httpSpnegoKeytabFile.getAbsolutePath());
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
conf.set(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
"localhost:10200");
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
"localhost:8188");
conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
"localhost:8190");
conf.set("hadoop.proxyuser.HTTP.hosts", "*");
conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 1);
if (withSsl) {
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTPS_ONLY.name());
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir =
KeyStoreTestUtil.getClasspathDir(TestTimelineAuthenticationFilter.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
}
UserGroupInformation.setConfiguration(conf);
testTimelineServer.init(conf);
testTimelineServer.start();
} catch (Exception e) {
assertTrue("Couldn't setup TimelineServer", false);
}
}
private TimelineClient createTimelineClientForUGI() {
TimelineClient client = TimelineClient.createTimelineClient();
client.init(conf);
client.start();
return client;
}
@AfterClass
public static void tearDown() throws Exception {
if (testMiniKDC != null) {
testMiniKDC.stop();
}
if (testTimelineServer != null) {
testTimelineServer.stop();
}
if (withSsl) {
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
}
}
@Test
public void testPutTimelineEntities() throws Exception {
KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
@Override
public Void call() throws Exception {
TimelineClient client = createTimelineClientForUGI();
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType(
TestTimelineAuthenticationFilter.class.getName());
entityToStore.setEntityId("entity1");
entityToStore.setStartTime(0L);
TimelinePutResponse putResponse = client.putEntities(entityToStore);
Assert.assertEquals(0, putResponse.getErrors().size());
TimelineEntity entityToRead =
testTimelineServer.getTimelineStore().getEntity(
"entity1", TestTimelineAuthenticationFilter.class.getName(), null);
Assert.assertNotNull(entityToRead);
return null;
}
});
}
@Test
public void testPutDomains() throws Exception {
KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
@Override
public Void call() throws Exception {
TimelineClient client = createTimelineClientForUGI();
TimelineDomain domainToStore = new TimelineDomain();
domainToStore.setId(TestTimelineAuthenticationFilter.class.getName());
domainToStore.setReaders("*");
domainToStore.setWriters("*");
client.putDomain(domainToStore);
TimelineDomain domainToRead =
testTimelineServer.getTimelineStore().getDomain(
TestTimelineAuthenticationFilter.class.getName());
Assert.assertNotNull(domainToRead);
return null;
}
});
}
@Test
public void testDelegationTokenOperations() throws Exception {
TimelineClient httpUserClient =
KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<TimelineClient>() {
@Override
public TimelineClient call() throws Exception {
return createTimelineClientForUGI();
}
});
UserGroupInformation httpUser =
KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<UserGroupInformation>() {
@Override
public UserGroupInformation call() throws Exception {
return UserGroupInformation.getCurrentUser();
}
});
// Let HTTP user to get the delegation for itself
Token<TimelineDelegationTokenIdentifier> token =
httpUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.assertNotNull(token);
TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
Assert.assertNotNull(tDT);
Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
// Renew token
Assert.assertFalse(token.getService().toString().isEmpty());
// Renew the token from the token service address
long renewTime1 = httpUserClient.renewDelegationToken(token);
Thread.sleep(100);
token.setService(new Text());
Assert.assertTrue(token.getService().toString().isEmpty());
// If the token service address is not avaiable, it still can be renewed
// from the configured address
long renewTime2 = httpUserClient.renewDelegationToken(token);
Assert.assertTrue(renewTime1 < renewTime2);
// Cancel token
Assert.assertTrue(token.getService().toString().isEmpty());
// If the token service address is not avaiable, it still can be canceled
// from the configured address
httpUserClient.cancelDelegationToken(token);
// Renew should not be successful because the token is canceled
try {
httpUserClient.renewDelegationToken(token);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains(
"Renewal request for unknown token"));
}
// Let HTTP user to get the delegation token for FOO user
UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(
FOO_USER, httpUser);
TimelineClient fooUserClient = fooUgi.doAs(
new PrivilegedExceptionAction<TimelineClient>() {
@Override
public TimelineClient run() throws Exception {
return createTimelineClientForUGI();
}
});
token = fooUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.assertNotNull(token);
tDT = token.decodeIdentifier();
Assert.assertNotNull(tDT);
Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
// Renew token as the renewer
final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew);
renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew);
Assert.assertTrue(renewTime1 < renewTime2);
// Cancel token
Assert.assertFalse(tokenToRenew.getService().toString().isEmpty());
// Cancel the token from the token service address
fooUserClient.cancelDelegationToken(tokenToRenew);
// Renew should not be successful because the token is canceled
try {
httpUserClient.renewDelegationToken(tokenToRenew);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(
e.getMessage().contains("Renewal request for unknown token"));
}
// Let HTTP user to get the delegation token for BAR user
UserGroupInformation barUgi = UserGroupInformation.createProxyUser(
BAR_USER, httpUser);
TimelineClient barUserClient = barUgi.doAs(
new PrivilegedExceptionAction<TimelineClient>() {
@Override
public TimelineClient run() {
return createTimelineClientForUGI();
}
});
try {
barUserClient.getDelegationToken(httpUser.getShortUserName());
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException);
}
}
}
| 12,891 | 38.790123 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
public class TestCrossOriginFilterInitializer {
@Test
public void testGetFilterParameters() {
// Initialize configuration object
Configuration conf = new Configuration();
conf.set(CrossOriginFilterInitializer.PREFIX + "rootparam", "rootvalue");
conf.set(CrossOriginFilterInitializer.PREFIX + "nested.param",
"nestedvalue");
conf.set("outofscopeparam", "outofscopevalue");
// call function under test
Map<String, String> filterParameters =
CrossOriginFilterInitializer.getFilterParameters(conf);
// retrieve values
String rootvalue = filterParameters.get("rootparam");
String nestedvalue = filterParameters.get("nested.param");
String outofscopeparam = filterParameters.get("outofscopeparam");
// verify expected values are in place
Assert.assertEquals("Could not find filter parameter", "rootvalue",
rootvalue);
Assert.assertEquals("Could not find filter parameter", "nestedvalue",
nestedvalue);
Assert.assertNull("Found unexpected value in filter parameters",
outofscopeparam);
}
}
| 2,076 | 34.810345 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.io.IOException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.junit.Assert;
import org.junit.Test;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
public class TestCrossOriginFilter {
@Test
public void testSameOrigin() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Origin is not specified for same origin requests
HttpServletRequest mockReq = mock(HttpServletRequest.class);
when(mockReq.getHeader(CrossOriginFilter.ORIGIN)).thenReturn(null);
// Objects to verify interactions based on request
HttpServletResponse mockRes = mock(HttpServletResponse.class);
FilterChain mockChain = mock(FilterChain.class);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
filter.doFilter(mockReq, mockRes, mockChain);
verifyZeroInteractions(mockRes);
verify(mockChain).doFilter(mockReq, mockRes);
}
@Test
public void testAllowAllOrigins() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "*");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
Assert.assertTrue(filter.areOriginsAllowed("example.com"));
}
@Test
public void testEncodeHeaders() {
String validOrigin = "http://localhost:12345";
String encodedValidOrigin = CrossOriginFilter.encodeHeader(validOrigin);
Assert.assertEquals("Valid origin encoding should match exactly",
validOrigin, encodedValidOrigin);
String httpResponseSplitOrigin = validOrigin + " \nSecondHeader: value";
String encodedResponseSplitOrigin =
CrossOriginFilter.encodeHeader(httpResponseSplitOrigin);
Assert.assertEquals("Http response split origin should be protected against",
validOrigin, encodedResponseSplitOrigin);
// Test Origin List
String validOriginList = "http://foo.example.com:12345 http://bar.example.com:12345";
String encodedValidOriginList = CrossOriginFilter.encodeHeader(validOriginList);
Assert.assertEquals("Valid origin list encoding should match exactly",
validOriginList, encodedValidOriginList);
}
@Test
public void testPatternMatchingOrigins() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "*.example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
// match multiple sub-domains
Assert.assertFalse(filter.areOriginsAllowed("example.com"));
Assert.assertFalse(filter.areOriginsAllowed("foo:example.com"));
Assert.assertTrue(filter.areOriginsAllowed("foo.example.com"));
Assert.assertTrue(filter.areOriginsAllowed("foo.bar.example.com"));
// First origin is allowed
Assert.assertTrue(filter.areOriginsAllowed("foo.example.com foo.nomatch.com"));
// Second origin is allowed
Assert.assertTrue(filter.areOriginsAllowed("foo.nomatch.com foo.example.com"));
// No origin in list is allowed
Assert.assertFalse(filter.areOriginsAllowed("foo.nomatch1.com foo.nomatch2.com"));
}
@Test
public void testDisallowedOrigin() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Origin is not specified for same origin requests
HttpServletRequest mockReq = mock(HttpServletRequest.class);
when(mockReq.getHeader(CrossOriginFilter.ORIGIN)).thenReturn("example.org");
// Objects to verify interactions based on request
HttpServletResponse mockRes = mock(HttpServletResponse.class);
FilterChain mockChain = mock(FilterChain.class);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
filter.doFilter(mockReq, mockRes, mockChain);
verifyZeroInteractions(mockRes);
verify(mockChain).doFilter(mockReq, mockRes);
}
@Test
public void testDisallowedMethod() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Origin is not specified for same origin requests
HttpServletRequest mockReq = mock(HttpServletRequest.class);
when(mockReq.getHeader(CrossOriginFilter.ORIGIN)).thenReturn("example.com");
when(mockReq.getHeader(CrossOriginFilter.ACCESS_CONTROL_REQUEST_METHOD))
.thenReturn("DISALLOWED_METHOD");
// Objects to verify interactions based on request
HttpServletResponse mockRes = mock(HttpServletResponse.class);
FilterChain mockChain = mock(FilterChain.class);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
filter.doFilter(mockReq, mockRes, mockChain);
verifyZeroInteractions(mockRes);
verify(mockChain).doFilter(mockReq, mockRes);
}
@Test
public void testDisallowedHeader() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Origin is not specified for same origin requests
HttpServletRequest mockReq = mock(HttpServletRequest.class);
when(mockReq.getHeader(CrossOriginFilter.ORIGIN)).thenReturn("example.com");
when(mockReq.getHeader(CrossOriginFilter.ACCESS_CONTROL_REQUEST_METHOD))
.thenReturn("GET");
when(mockReq.getHeader(CrossOriginFilter.ACCESS_CONTROL_REQUEST_HEADERS))
.thenReturn("Disallowed-Header");
// Objects to verify interactions based on request
HttpServletResponse mockRes = mock(HttpServletResponse.class);
FilterChain mockChain = mock(FilterChain.class);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
filter.doFilter(mockReq, mockRes, mockChain);
verifyZeroInteractions(mockRes);
verify(mockChain).doFilter(mockReq, mockRes);
}
@Test
public void testCrossOriginFilter() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Origin is not specified for same origin requests
HttpServletRequest mockReq = mock(HttpServletRequest.class);
when(mockReq.getHeader(CrossOriginFilter.ORIGIN)).thenReturn("example.com");
when(mockReq.getHeader(CrossOriginFilter.ACCESS_CONTROL_REQUEST_METHOD))
.thenReturn("GET");
when(mockReq.getHeader(CrossOriginFilter.ACCESS_CONTROL_REQUEST_HEADERS))
.thenReturn("X-Requested-With");
// Objects to verify interactions based on request
HttpServletResponse mockRes = mock(HttpServletResponse.class);
FilterChain mockChain = mock(FilterChain.class);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
filter.doFilter(mockReq, mockRes, mockChain);
verify(mockRes).setHeader(CrossOriginFilter.ACCESS_CONTROL_ALLOW_ORIGIN,
"example.com");
verify(mockRes).setHeader(
CrossOriginFilter.ACCESS_CONTROL_ALLOW_CREDENTIALS,
Boolean.TRUE.toString());
verify(mockRes).setHeader(CrossOriginFilter.ACCESS_CONTROL_ALLOW_METHODS,
filter.getAllowedMethodsHeader());
verify(mockRes).setHeader(CrossOriginFilter.ACCESS_CONTROL_ALLOW_HEADERS,
filter.getAllowedHeadersHeader());
verify(mockChain).doFilter(mockReq, mockRes);
}
@Test
public void testCrossOriginFilterAfterRestart() throws ServletException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "example.com");
conf.put(CrossOriginFilter.ALLOWED_HEADERS, "X-Requested-With,Accept");
conf.put(CrossOriginFilter.ALLOWED_METHODS, "GET,POST");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
//verify filter values
Assert.assertTrue("Allowed headers do not match",
filter.getAllowedHeadersHeader()
.compareTo("X-Requested-With,Accept") == 0);
Assert.assertTrue("Allowed methods do not match",
filter.getAllowedMethodsHeader()
.compareTo("GET,POST") == 0);
Assert.assertTrue(filter.areOriginsAllowed("example.com"));
//destroy filter values and clear conf
filter.destroy();
conf.clear();
// Setup the configuration settings of the server
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "newexample.com");
conf.put(CrossOriginFilter.ALLOWED_HEADERS, "Content-Type,Origin");
conf.put(CrossOriginFilter.ALLOWED_METHODS, "GET,HEAD");
filterConfig = new FilterConfigTest(conf);
//initialize filter
filter.init(filterConfig);
//verify filter values
Assert.assertTrue("Allowed headers do not match",
filter.getAllowedHeadersHeader()
.compareTo("Content-Type,Origin") == 0);
Assert.assertTrue("Allowed methods do not match",
filter.getAllowedMethodsHeader()
.compareTo("GET,HEAD") == 0);
Assert.assertTrue(filter.areOriginsAllowed("newexample.com"));
//destroy filter values
filter.destroy();
}
private static class FilterConfigTest implements FilterConfig {
final Map<String, String> map;
FilterConfigTest(Map<String, String> map) {
this.map = map;
}
@Override
public String getFilterName() {
return "test-filter";
}
@Override
public String getInitParameter(String key) {
return map.get(key);
}
@Override
public Enumeration<String> getInitParameterNames() {
return Collections.enumeration(map.keySet());
}
@Override
public ServletContext getServletContext() {
return null;
}
}
}
| 12,224 | 36.615385 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServicesWithSSL.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.io.File;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.sun.jersey.api.client.ClientResponse;
public class TestTimelineWebServicesWithSSL {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/"
+ TestTimelineWebServicesWithSSL.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static ApplicationHistoryServer timelineServer;
private static TimelineStore store;
private static Configuration conf;
@BeforeClass
public static void setupServer() throws Exception {
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, "HTTPS_ONLY");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir =
KeyStoreTestUtil.getClasspathDir(TestTimelineWebServicesWithSSL.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
conf.addResource("ssl-server.xml");
conf.addResource("ssl-client.xml");
timelineServer = new ApplicationHistoryServer();
timelineServer.init(conf);
timelineServer.start();
store = timelineServer.getTimelineStore();
}
@AfterClass
public static void tearDownServer() throws Exception {
if (timelineServer != null) {
timelineServer.stop();
}
}
@Test
public void testPutEntities() throws Exception {
TestTimelineClient client = new TestTimelineClient();
try {
client.init(conf);
client.start();
TimelineEntity expectedEntity = new TimelineEntity();
expectedEntity.setEntityType("test entity type");
expectedEntity.setEntityId("test entity id");
expectedEntity.setDomainId("test domain id");
TimelineEvent event = new TimelineEvent();
event.setEventType("test event type");
event.setTimestamp(0L);
expectedEntity.addEvent(event);
TimelinePutResponse response = client.putEntities(expectedEntity);
Assert.assertEquals(0, response.getErrors().size());
Assert.assertTrue(client.resp.toString().contains("https"));
TimelineEntity actualEntity = store.getEntity(
expectedEntity.getEntityId(), expectedEntity.getEntityType(),
EnumSet.allOf(Field.class));
Assert.assertNotNull(actualEntity);
Assert.assertEquals(
expectedEntity.getEntityId(), actualEntity.getEntityId());
Assert.assertEquals(
expectedEntity.getEntityType(), actualEntity.getEntityType());
} finally {
client.stop();
client.close();
}
}
private static class TestTimelineClient extends TimelineClientImpl {
private ClientResponse resp;
@Override
public ClientResponse doPostingObject(Object obj, String path) {
resp = super.doPostingObject(obj, path);
return resp;
}
}
}
| 4,896 | 35.544776 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.timeline.TestMemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.junit.Assert;
import org.junit.Test;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
public class TestTimelineWebServices extends JerseyTestBase {
private static TimelineStore store;
private static TimelineACLsManager timelineACLsManager;
private static AdminACLsManager adminACLsManager;
private long beforeTime;
private Injector injector = Guice.createInjector(new ServletModule() {
@SuppressWarnings("unchecked")
@Override
protected void configureServlets() {
bind(YarnJacksonJaxbJsonProvider.class);
bind(TimelineWebServices.class);
bind(GenericExceptionHandler.class);
try {
store = mockTimelineStore();
} catch (Exception e) {
Assert.fail();
}
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
timelineACLsManager = new TimelineACLsManager(conf);
timelineACLsManager.setTimelineStore(store);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
adminACLsManager = new AdminACLsManager(conf);
TimelineDataManager timelineDataManager =
new TimelineDataManager(store, timelineACLsManager);
timelineDataManager.init(conf);
timelineDataManager.start();
bind(TimelineDataManager.class).toInstance(timelineDataManager);
serve("/*").with(GuiceContainer.class);
TimelineAuthenticationFilter taFilter =
new TimelineAuthenticationFilter();
FilterConfig filterConfig = mock(FilterConfig.class);
when(filterConfig.getInitParameter(AuthenticationFilter.CONFIG_PREFIX))
.thenReturn(null);
when(filterConfig.getInitParameter(AuthenticationFilter.AUTH_TYPE))
.thenReturn("simple");
when(filterConfig.getInitParameter(
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)).thenReturn("true");
ServletContext context = mock(ServletContext.class);
when(filterConfig.getServletContext()).thenReturn(context);
Enumeration<Object> names = mock(Enumeration.class);
when(names.hasMoreElements()).thenReturn(true, true, true, false);
when(names.nextElement()).thenReturn(
AuthenticationFilter.AUTH_TYPE,
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED,
DelegationTokenAuthenticationHandler.TOKEN_KIND);
when(filterConfig.getInitParameterNames()).thenReturn(names);
when(filterConfig.getInitParameter(
DelegationTokenAuthenticationHandler.TOKEN_KIND)).thenReturn(
TimelineDelegationTokenIdentifier.KIND_NAME.toString());
try {
taFilter.init(filterConfig);
} catch (ServletException e) {
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
e.getMessage());
}
taFilter = spy(taFilter);
try {
doNothing().when(taFilter).init(any(FilterConfig.class));
} catch (ServletException e) {
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
e.getMessage());
}
filter("/*").through(taFilter);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
private TimelineStore mockTimelineStore()
throws Exception {
beforeTime = System.currentTimeMillis() - 1;
TestMemoryTimelineStore store =
new TestMemoryTimelineStore();
store.setup();
return store.getTimelineStore();
}
public TestTimelineWebServices() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter")
.servletPath("/")
.clientConfig(
new DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
.build());
}
@Test
public void testAbout() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineAbout actualAbout = response.getEntity(TimelineAbout.class);
TimelineAbout expectedAbout =
TimelineUtils.createTimelineAbout("Timeline API");
Assert.assertNotNull(
"Timeline service about response is null", actualAbout);
Assert.assertEquals(expectedAbout.getAbout(), actualAbout.getAbout());
Assert.assertEquals(expectedAbout.getTimelineServiceVersion(),
actualAbout.getTimelineServiceVersion());
Assert.assertEquals(expectedAbout.getTimelineServiceBuildVersion(),
actualAbout.getTimelineServiceBuildVersion());
Assert.assertEquals(expectedAbout.getTimelineServiceVersionBuiltOn(),
actualAbout.getTimelineServiceVersionBuiltOn());
Assert.assertEquals(expectedAbout.getHadoopVersion(),
actualAbout.getHadoopVersion());
Assert.assertEquals(expectedAbout.getHadoopBuildVersion(),
actualAbout.getHadoopBuildVersion());
Assert.assertEquals(expectedAbout.getHadoopVersionBuiltOn(),
actualAbout.getHadoopVersionBuiltOn());
}
private static void verifyEntities(TimelineEntities entities) {
Assert.assertNotNull(entities);
Assert.assertEquals(3, entities.getEntities().size());
TimelineEntity entity1 = entities.getEntities().get(0);
Assert.assertNotNull(entity1);
Assert.assertEquals("id_1", entity1.getEntityId());
Assert.assertEquals("type_1", entity1.getEntityType());
Assert.assertEquals(123l, entity1.getStartTime().longValue());
Assert.assertEquals(2, entity1.getEvents().size());
Assert.assertEquals(4, entity1.getPrimaryFilters().size());
Assert.assertEquals(4, entity1.getOtherInfo().size());
TimelineEntity entity2 = entities.getEntities().get(1);
Assert.assertNotNull(entity2);
Assert.assertEquals("id_2", entity2.getEntityId());
Assert.assertEquals("type_1", entity2.getEntityType());
Assert.assertEquals(123l, entity2.getStartTime().longValue());
Assert.assertEquals(2, entity2.getEvents().size());
Assert.assertEquals(4, entity2.getPrimaryFilters().size());
Assert.assertEquals(4, entity2.getOtherInfo().size());
TimelineEntity entity3 = entities.getEntities().get(2);
Assert.assertNotNull(entity2);
Assert.assertEquals("id_6", entity3.getEntityId());
Assert.assertEquals("type_1", entity3.getEntityType());
Assert.assertEquals(61l, entity3.getStartTime().longValue());
Assert.assertEquals(0, entity3.getEvents().size());
Assert.assertEquals(4, entity3.getPrimaryFilters().size());
Assert.assertEquals(4, entity3.getOtherInfo().size());
}
@Test
public void testGetEntities() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testFromId() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("fromId", "id_2")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(2, response.getEntity(TimelineEntities.class).getEntities()
.size());
response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("fromId", "id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(3, response.getEntity(TimelineEntities.class).getEntities()
.size());
}
@Test
public void testFromTs() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("fromTs", Long.toString(beforeTime))
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(0, response.getEntity(TimelineEntities.class).getEntities()
.size());
response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("fromTs", Long.toString(
System.currentTimeMillis()))
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(3, response.getEntity(TimelineEntities.class).getEntities()
.size());
}
@Test
public void testPrimaryFilterString() {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("primaryFilter", "user:username")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testPrimaryFilterInteger() {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("primaryFilter",
"appname:" + Integer.toString(Integer.MAX_VALUE))
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testPrimaryFilterLong() {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("primaryFilter",
"long:" + Long.toString((long) Integer.MAX_VALUE + 1l))
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testPrimaryFilterNumericString() {
// without quotes, 123abc is interpreted as the number 123,
// which finds no entities
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("primaryFilter", "other:123abc")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(0, response.getEntity(TimelineEntities.class).getEntities()
.size());
}
@Test
public void testPrimaryFilterNumericStringWithQuotes() {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").queryParam("primaryFilter", "other:\"123abc\"")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testSecondaryFilters() {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1")
.queryParam("secondaryFilter",
"user:username,appname:" + Integer.toString(Integer.MAX_VALUE))
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
@Test
public void testGetEntity() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").path("id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineEntity entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1", entity.getEntityId());
Assert.assertEquals("type_1", entity.getEntityType());
Assert.assertEquals(123l, entity.getStartTime().longValue());
Assert.assertEquals(2, entity.getEvents().size());
Assert.assertEquals(4, entity.getPrimaryFilters().size());
Assert.assertEquals(4, entity.getOtherInfo().size());
}
@Test
public void testGetEntityFields1() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").path("id_1").queryParam("fields", "events,otherinfo")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineEntity entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1", entity.getEntityId());
Assert.assertEquals("type_1", entity.getEntityType());
Assert.assertEquals(123l, entity.getStartTime().longValue());
Assert.assertEquals(2, entity.getEvents().size());
Assert.assertEquals(0, entity.getPrimaryFilters().size());
Assert.assertEquals(4, entity.getOtherInfo().size());
}
@Test
public void testGetEntityFields2() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").path("id_1").queryParam("fields", "lasteventonly," +
"primaryfilters,relatedentities")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineEntity entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1", entity.getEntityId());
Assert.assertEquals("type_1", entity.getEntityType());
Assert.assertEquals(123l, entity.getStartTime().longValue());
Assert.assertEquals(1, entity.getEvents().size());
Assert.assertEquals(4, entity.getPrimaryFilters().size());
Assert.assertEquals(0, entity.getOtherInfo().size());
}
@Test
public void testGetEvents() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1").path("events")
.queryParam("entityId", "id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineEvents events = response.getEntity(TimelineEvents.class);
Assert.assertNotNull(events);
Assert.assertEquals(1, events.getAllEvents().size());
TimelineEvents.EventsOfOneEntity partEvents = events.getAllEvents().get(0);
Assert.assertEquals(2, partEvents.getEvents().size());
TimelineEvent event1 = partEvents.getEvents().get(0);
Assert.assertEquals(456l, event1.getTimestamp());
Assert.assertEquals("end_event", event1.getEventType());
Assert.assertEquals(1, event1.getEventInfo().size());
TimelineEvent event2 = partEvents.getEvents().get(1);
Assert.assertEquals(123l, event2.getTimestamp());
Assert.assertEquals("start_event", event2.getEventType());
Assert.assertEquals(0, event2.getEventInfo().size());
}
@Test
public void testPostEntitiesWithPrimaryFilter() throws Exception {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
Map<String, Set<Object>> filters = new HashMap<String, Set<Object>>();
filters.put(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),
new HashSet<Object>());
entity.setPrimaryFilters(filters);
entity.setEntityId("test id 6");
entity.setEntityType("test type 6");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
TimelinePutResponse putResposne =
response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResposne.getErrors().size());
}
@Test
public void testPostEntities() throws Exception {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 1");
entity.setEntityType("test type 1");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
entities.addEntity(entity);
WebResource r = resource();
// No owner, will be rejected
ClientResponse response = r.path("ws").path("v1").path("timeline")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(ClientResponse.Status.FORBIDDEN,
response.getClientResponseStatus());
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResposne =
response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResposne);
Assert.assertEquals(0, putResposne.getErrors().size());
// verify the entity exists in the store
response = r.path("ws").path("v1").path("timeline")
.path("test type 1").path("test id 1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id 1", entity.getEntityId());
Assert.assertEquals("test type 1", entity.getEntityType());
}
@Test
public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 2");
entity.setEntityType("test type 2");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResponse =
response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(0, putResponse.getErrors().size());
// override/append timeline data in the same entity with different user
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_3")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
putResponse = response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(1, putResponse.getErrors().size());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,
putResponse.getErrors().get(0).getErrorCode());
// Cross domain relationship will be rejected
entities = new TimelineEntities();
entity = new TimelineEntity();
entity.setEntityId("test id 3");
entity.setEntityType("test type 2");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_2");
entity.setRelatedEntities(Collections.singletonMap(
"test type 2", Collections.singleton("test id 2")));
entities.addEntity(entity);
r = resource();
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_3")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
putResponse = response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(1, putResponse.getErrors().size());
Assert.assertEquals(TimelinePutError.FORBIDDEN_RELATION,
putResponse.getErrors().get(0).getErrorCode());
// Make sure the entity has been added anyway even though the
// relationship is been excluded
response = r.path("ws").path("v1").path("timeline")
.path("test type 2").path("test id 3")
.queryParam("user.name", "reader_user_3")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id 3", entity.getEntityId());
Assert.assertEquals("test type 2", entity.getEntityType());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testPostEntitiesToDefaultDomain() throws Exception {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 7");
entity.setEntityType("test type 7");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "anybody_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResposne =
response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResposne);
Assert.assertEquals(0, putResposne.getErrors().size());
// verify the entity exists in the store
response = r.path("ws").path("v1").path("timeline")
.path("test type 7").path("test id 7")
.queryParam("user.name", "any_body_2")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id 7", entity.getEntityId());
Assert.assertEquals("test type 7", entity.getEntityType());
Assert.assertEquals(TimelineDataManager.DEFAULT_DOMAIN_ID,
entity.getDomainId());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetEntityWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 3");
entity.setEntityType("test type 3");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResponse =
response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResponse.getErrors().size());
// verify the system data will not be exposed
// 1. No field specification
response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3")
.queryParam("user.name", "reader_user_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(
TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
// 2. other field
response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3")
.queryParam("fields", "relatedentities")
.queryParam("user.name", "reader_user_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(
TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
// 3. primaryfilters field
response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3")
.queryParam("fields", "primaryfilters")
.queryParam("user.name", "reader_user_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(
TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
// get entity with other user
response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3")
.queryParam("user.name", "reader_user_2")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(ClientResponse.Status.NOT_FOUND,
response.getClientResponseStatus());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetEntitiesWithYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
// Put entity [4, 4] in domain 1
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 4");
entity.setEntityType("test type 4");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResponse =
response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResponse.getErrors().size());
// Put entity [4, 5] in domain 2
entities = new TimelineEntities();
entity = new TimelineEntity();
entity.setEntityId("test id 5");
entity.setEntityType("test type 4");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_2");
entities.addEntity(entity);
r = resource();
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_3")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
putResponse = response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResponse.getErrors().size());
// Query entities of type 4
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "reader_user_1")
.path("test type 4")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entities = response.getEntity(TimelineEntities.class);
// Reader 1 should just have the access to entity [4, 4]
assertEquals(1, entities.getEntities().size());
assertEquals("test type 4", entities.getEntities().get(0).getEntityType());
assertEquals("test id 4", entities.getEntities().get(0).getEntityId());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetEventsWithYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
// Put entity [5, 5] in domain 1
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 5");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
TimelineEvent event = new TimelineEvent();
event.setEventType("event type 1");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResponse =
response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResponse.getErrors().size());
// Put entity [5, 6] in domain 2
entities = new TimelineEntities();
entity = new TimelineEntity();
entity.setEntityId("test id 6");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_2");
event = new TimelineEvent();
event.setEventType("event type 2");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
r = resource();
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_3")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
putResponse = response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(0, putResponse.getErrors().size());
// Query events belonging to the entities of type 4
response = r.path("ws").path("v1").path("timeline")
.path("test type 5").path("events")
.queryParam("user.name", "reader_user_1")
.queryParam("entityId", "test id 5,test id 6")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineEvents events = response.getEntity(TimelineEvents.class);
// Reader 1 should just have the access to the events of entity [5, 5]
assertEquals(1, events.getAllEvents().size());
assertEquals("test id 5", events.getAllEvents().get(0).getEntityId());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetDomain() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("domain").path("domain_id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineDomain domain = response.getEntity(TimelineDomain.class);
verifyDomain(domain, "domain_id_1");
}
@Test
public void testGetDomainYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("domain").path("domain_id_1")
.queryParam("user.name", "owner_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineDomain domain = response.getEntity(TimelineDomain.class);
verifyDomain(domain, "domain_id_1");
response = r.path("ws").path("v1").path("timeline")
.path("domain").path("domain_id_1")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
Assert.assertEquals(ClientResponse.Status.NOT_FOUND,
response.getClientResponseStatus());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testGetDomains() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("domain")
.queryParam("owner", "owner_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineDomains domains = response.getEntity(TimelineDomains.class);
Assert.assertEquals(2, domains.getDomains().size());
for (int i = 0; i < domains.getDomains().size(); ++i) {
verifyDomain(domains.getDomains().get(i),
i == 0 ? "domain_id_4" : "domain_id_1");
}
}
@Test
public void testGetDomainsYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("domain")
.queryParam("user.name", "owner_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelineDomains domains = response.getEntity(TimelineDomains.class);
Assert.assertEquals(2, domains.getDomains().size());
for (int i = 0; i < domains.getDomains().size(); ++i) {
verifyDomain(domains.getDomains().get(i),
i == 0 ? "domain_id_4" : "domain_id_1");
}
response = r.path("ws").path("v1").path("timeline")
.path("domain")
.queryParam("owner", "owner_1")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
domains = response.getEntity(TimelineDomains.class);
Assert.assertEquals(0, domains.getDomains().size());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
@Test
public void testPutDomain() throws Exception {
TimelineDomain domain = new TimelineDomain();
domain.setId("test_domain_id");
WebResource r = resource();
// No owner, will be rejected
ClientResponse response = r.path("ws").path("v1")
.path("timeline").path("domain")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, domain);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(ClientResponse.Status.FORBIDDEN,
response.getClientResponseStatus());
response = r.path("ws").path("v1")
.path("timeline").path("domain")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, domain);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
// Verify the domain exists
response = r.path("ws").path("v1").path("timeline")
.path("domain").path("test_domain_id")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
domain = response.getEntity(TimelineDomain.class);
Assert.assertNotNull(domain);
Assert.assertEquals("test_domain_id", domain.getId());
Assert.assertEquals("tester", domain.getOwner());
Assert.assertEquals(null, domain.getDescription());
// Update the domain
domain.setDescription("test_description");
response = r.path("ws").path("v1")
.path("timeline").path("domain")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, domain);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
// Verify the domain is updated
response = r.path("ws").path("v1").path("timeline")
.path("domain").path("test_domain_id")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
Assert.assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
domain = response.getEntity(TimelineDomain.class);
Assert.assertNotNull(domain);
Assert.assertEquals("test_domain_id", domain.getId());
Assert.assertEquals("test_description", domain.getDescription());
}
@Test
public void testPutDomainYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineDomain domain = new TimelineDomain();
domain.setId("test_domain_id_acl");
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1")
.path("timeline").path("domain")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, domain);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
// Update the domain by another user
response = r.path("ws").path("v1")
.path("timeline").path("domain")
.queryParam("user.name", "other")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, domain);
assertEquals(Status.FORBIDDEN.getStatusCode(), response.getStatus());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
private static void verifyDomain(TimelineDomain domain, String domainId) {
Assert.assertNotNull(domain);
Assert.assertEquals(domainId, domain.getId());
// The specific values have been verified in TestMemoryTimelineStore
Assert.assertNotNull(domain.getDescription());
Assert.assertNotNull(domain.getOwner());
Assert.assertNotNull(domain.getReaders());
Assert.assertNotNull(domain.getWriters());
Assert.assertNotNull(domain.getCreatedTime());
Assert.assertNotNull(domain.getModifiedTime());
}
}
| 44,344 | 43.080517 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/recovery/TestLeveldbTimelineStateStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.recovery;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestLeveldbTimelineStateStore {
private FileContext fsContext;
private File fsPath;
private Configuration conf;
private TimelineStateStore store;
@Before
public void setup() throws Exception {
fsPath = new File("target", getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
fsContext = FileContext.getLocalFSFileContext();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED, true);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
LeveldbTimelineStateStore.class,
TimelineStateStore.class);
conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH,
fsPath.getAbsolutePath());
}
@After
public void tearDown() throws Exception {
if (store != null) {
store.stop();
}
if (fsContext != null) {
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
}
}
private LeveldbTimelineStateStore initAndStartTimelineServiceStateStoreService() {
store = new LeveldbTimelineStateStore();
store.init(conf);
store.start();
return (LeveldbTimelineStateStore) store;
}
@Test
public void testTokenStore() throws Exception {
initAndStartTimelineServiceStateStoreService();
TimelineServiceState state = store.loadState();
assertTrue("token state not empty", state.tokenState.isEmpty());
assertTrue("key state not empty", state.tokenMasterKeyState.isEmpty());
final DelegationKey key1 = new DelegationKey(1, 2, "keyData1".getBytes());
final TimelineDelegationTokenIdentifier token1 =
new TimelineDelegationTokenIdentifier(new Text("tokenOwner1"),
new Text("tokenRenewer1"), new Text("tokenUser1"));
token1.setSequenceNumber(1);
token1.getBytes();
final Long tokenDate1 = 1L;
final TimelineDelegationTokenIdentifier token2 =
new TimelineDelegationTokenIdentifier(new Text("tokenOwner2"),
new Text("tokenRenewer2"), new Text("tokenUser2"));
token2.setSequenceNumber(12345678);
token2.getBytes();
final Long tokenDate2 = 87654321L;
store.storeTokenMasterKey(key1);
try {
store.storeTokenMasterKey(key1);
fail("redundant store of key undetected");
} catch (IOException e) {
// expected
}
store.storeToken(token1, tokenDate1);
store.storeToken(token2, tokenDate2);
try {
store.storeToken(token1, tokenDate1);
fail("redundant store of token undetected");
} catch (IOException e) {
// expected
}
store.close();
initAndStartTimelineServiceStateStoreService();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertTrue("missing token 1", state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date", tokenDate1,
state.tokenState.get(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", tokenDate2,
state.tokenState.get(token2));
assertEquals("incorrect master key count", 1,
state.tokenMasterKeyState.size());
assertTrue("missing master key 1",
state.tokenMasterKeyState.contains(key1));
assertEquals("incorrect latest sequence number", 12345678,
state.getLatestSequenceNumber());
final DelegationKey key2 = new DelegationKey(3, 4, "keyData2".getBytes());
final DelegationKey key3 = new DelegationKey(5, 6, "keyData3".getBytes());
final TimelineDelegationTokenIdentifier token3 =
new TimelineDelegationTokenIdentifier(new Text("tokenOwner3"),
new Text("tokenRenewer3"), new Text("tokenUser3"));
token3.setSequenceNumber(12345679);
token3.getBytes();
final Long tokenDate3 = 87654321L;
store.removeToken(token1);
store.storeTokenMasterKey(key2);
final Long newTokenDate2 = 975318642L;
store.updateToken(token2, newTokenDate2);
store.removeTokenMasterKey(key1);
store.storeTokenMasterKey(key3);
store.storeToken(token3, tokenDate3);
store.close();
initAndStartTimelineServiceStateStoreService();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertFalse("token 1 not removed", state.tokenState.containsKey(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", newTokenDate2,
state.tokenState.get(token2));
assertTrue("missing token 3", state.tokenState.containsKey(token3));
assertEquals("incorrect token 3 date", tokenDate3,
state.tokenState.get(token3));
assertEquals("incorrect master key count", 2,
state.tokenMasterKeyState.size());
assertFalse("master key 1 not removed",
state.tokenMasterKeyState.contains(key1));
assertTrue("missing master key 2",
state.tokenMasterKeyState.contains(key2));
assertTrue("missing master key 3",
state.tokenMasterKeyState.contains(key3));
assertEquals("incorrect latest sequence number", 12345679,
state.getLatestSequenceNumber());
store.close();
}
@Test
public void testCheckVersion() throws IOException {
LeveldbTimelineStateStore store =
initAndStartTimelineServiceStateStoreService();
// default version
Version defaultVersion = store.getCurrentVersion();
Assert.assertEquals(defaultVersion, store.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
store.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion, store.loadVersion());
store.stop();
// overwrite the compatible version
store = initAndStartTimelineServiceStateStoreService();
Assert.assertEquals(defaultVersion, store.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
store.storeVersion(incompatibleVersion);
store.stop();
try {
initAndStartTimelineServiceStateStoreService();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for timeline state store"));
}
}
}
| 8,319 | 37.878505 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
import com.google.common.base.Preconditions;
public class ApplicationHistoryClientService extends AbstractService implements
ApplicationHistoryProtocol {
private static final Log LOG = LogFactory
.getLog(ApplicationHistoryClientService.class);
private ApplicationHistoryManager history;
private Server server;
private InetSocketAddress bindAddress;
public ApplicationHistoryClientService(ApplicationHistoryManager history) {
super("ApplicationHistoryClientService");
this.history = history;
}
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress address = conf.getSocketAddr(
YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
Preconditions.checkArgument(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT);
server =
rpc.getServer(ApplicationHistoryProtocol.class, this,
address, conf, null, conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
refreshServiceAcls(conf, new TimelinePolicyProvider());
}
server.start();
this.bindAddress =
conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
server.getListenerAddress());
LOG.info("Instantiated ApplicationHistoryClientService at "
+ this.bindAddress);
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (server != null) {
server.stop();
}
super.serviceStop();
}
@Private
public InetSocketAddress getBindAddress() {
return this.bindAddress;
}
private void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) {
this.server.refreshServiceAcl(configuration, policyProvider);
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException {
ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
try {
GetApplicationAttemptReportResponse response =
GetApplicationAttemptReportResponse.newInstance(history
.getApplicationAttempt(appAttemptId));
return response;
} catch (IOException e) {
LOG.error(e.getMessage(), e);
throw e;
}
}
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
GetApplicationAttemptsResponse response =
GetApplicationAttemptsResponse
.newInstance(new ArrayList<ApplicationAttemptReport>(history
.getApplicationAttempts(request.getApplicationId()).values()));
return response;
}
@Override
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws YarnException, IOException {
ApplicationId applicationId = request.getApplicationId();
try {
GetApplicationReportResponse response =
GetApplicationReportResponse.newInstance(history
.getApplication(applicationId));
return response;
} catch (IOException e) {
LOG.error(e.getMessage(), e);
throw e;
}
}
@Override
public GetApplicationsResponse
getApplications(GetApplicationsRequest request) throws YarnException,
IOException {
long startedBegin =
request.getStartRange() == null ? 0L : request.getStartRange()
.getMinimumLong();
long startedEnd =
request.getStartRange() == null ? Long.MAX_VALUE : request
.getStartRange().getMaximumLong();
GetApplicationsResponse response =
GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
history.getApplications(request.getLimit(), startedBegin, startedEnd)
.values()));
return response;
}
@Override
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException {
ContainerId containerId = request.getContainerId();
try {
GetContainerReportResponse response =
GetContainerReportResponse.newInstance(history
.getContainer(containerId));
return response;
} catch (IOException e) {
LOG.error(e.getMessage(), e);
throw e;
}
}
@Override
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException {
GetContainersResponse response =
GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
history.getContainers(request.getApplicationAttemptId()).values()));
return response;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws YarnException, IOException {
return null;
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException, IOException {
return null;
}
}
| 9,519 | 39.338983 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
/**
* It is the interface of writing the application history, exposing the methods
* of writing {@link ApplicationStartData}, {@link ApplicationFinishData}
* {@link ApplicationAttemptStartData}, {@link ApplicationAttemptFinishData},
* {@link ContainerStartData} and {@link ContainerFinishData}.
*/
@Private
@Unstable
public interface ApplicationHistoryWriter {
/**
* This method writes the information of <code>RMApp</code> that is available
* when it starts.
*
* @param appStart
* the record of the information of <code>RMApp</code> that is
* available when it starts
* @throws IOException
*/
void applicationStarted(ApplicationStartData appStart) throws IOException;
/**
* This method writes the information of <code>RMApp</code> that is available
* when it finishes.
*
* @param appFinish
* the record of the information of <code>RMApp</code> that is
* available when it finishes
* @throws IOException
*/
void applicationFinished(ApplicationFinishData appFinish) throws IOException;
/**
* This method writes the information of <code>RMAppAttempt</code> that is
* available when it starts.
*
* @param appAttemptStart
* the record of the information of <code>RMAppAttempt</code> that is
* available when it starts
* @throws IOException
*/
void applicationAttemptStarted(ApplicationAttemptStartData appAttemptStart)
throws IOException;
/**
* This method writes the information of <code>RMAppAttempt</code> that is
* available when it finishes.
*
* @param appAttemptFinish
* the record of the information of <code>RMAppAttempt</code> that is
* available when it finishes
* @throws IOException
*/
void
applicationAttemptFinished(ApplicationAttemptFinishData appAttemptFinish)
throws IOException;
/**
* This method writes the information of <code>RMContainer</code> that is
* available when it starts.
*
* @param containerStart
* the record of the information of <code>RMContainer</code> that is
* available when it starts
* @throws IOException
*/
void containerStarted(ContainerStartData containerStart) throws IOException;
/**
* This method writes the information of <code>RMContainer</code> that is
* available when it finishes.
*
* @param containerFinish
* the record of the information of <code>RMContainer</code> that is
* available when it finishes
* @throws IOException
*/
void containerFinished(ContainerFinishData containerFinish)
throws IOException;
}
| 4,292 | 36.99115 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.service.Service;
/**
* This class is the abstract of the storage of the application history data. It
* is a {@link Service}, such that the implementation of this class can make use
* of the service life cycle to initialize and cleanup the storage. Users can
* access the storage via {@link ApplicationHistoryReader} and
* {@link ApplicationHistoryWriter} interfaces.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public interface ApplicationHistoryStore extends Service,
ApplicationHistoryReader, ApplicationHistoryWriter {
}
| 1,571 | 40.368421 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.timeline.NameValuePair;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
implements
ApplicationHistoryManager {
private static final Log LOG = LogFactory
.getLog(ApplicationHistoryManagerOnTimelineStore.class);
@VisibleForTesting
static final String UNAVAILABLE = "N/A";
private TimelineDataManager timelineDataManager;
private ApplicationACLsManager aclsManager;
private String serverHttpAddress;
private long maxLoadedApplications;
public ApplicationHistoryManagerOnTimelineStore(
TimelineDataManager timelineDataManager,
ApplicationACLsManager aclsManager) {
super(ApplicationHistoryManagerOnTimelineStore.class.getName());
this.timelineDataManager = timelineDataManager;
this.aclsManager = aclsManager;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
serverHttpAddress = WebAppUtils.getHttpSchemePrefix(conf) +
WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
maxLoadedApplications =
conf.getLong(YarnConfiguration.APPLICATION_HISTORY_MAX_APPS,
YarnConfiguration.DEFAULT_APPLICATION_HISTORY_MAX_APPS);
super.serviceInit(conf);
}
@Override
public ApplicationReport getApplication(ApplicationId appId)
throws YarnException, IOException {
return getApplication(appId, ApplicationReportField.ALL).appReport;
}
@Override
public Map<ApplicationId, ApplicationReport> getApplications(long appsNum,
long appStartedTimeBegin, long appStartedTimeEnd) throws YarnException,
IOException {
TimelineEntities entities =
timelineDataManager.getEntities(
ApplicationMetricsConstants.ENTITY_TYPE, null, null,
appStartedTimeBegin, appStartedTimeEnd, null, null,
appsNum == Long.MAX_VALUE ? this.maxLoadedApplications : appsNum,
EnumSet.allOf(Field.class), UserGroupInformation.getLoginUser());
Map<ApplicationId, ApplicationReport> apps =
new LinkedHashMap<ApplicationId, ApplicationReport>();
if (entities != null && entities.getEntities() != null) {
for (TimelineEntity entity : entities.getEntities()) {
try {
ApplicationReportExt app =
generateApplicationReport(entity, ApplicationReportField.ALL);
apps.put(app.appReport.getApplicationId(), app.appReport);
} catch (Exception e) {
LOG.error("Error on generating application report for " +
entity.getEntityId(), e);
}
}
}
return apps;
}
@Override
public Map<ApplicationAttemptId, ApplicationAttemptReport>
getApplicationAttempts(ApplicationId appId)
throws YarnException, IOException {
ApplicationReportExt app = getApplication(
appId, ApplicationReportField.USER_AND_ACLS);
checkAccess(app);
TimelineEntities entities = timelineDataManager.getEntities(
AppAttemptMetricsConstants.ENTITY_TYPE,
new NameValuePair(
AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER, appId
.toString()), null, null, null, null, null,
Long.MAX_VALUE, EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
Map<ApplicationAttemptId, ApplicationAttemptReport> appAttempts =
new LinkedHashMap<ApplicationAttemptId, ApplicationAttemptReport>();
for (TimelineEntity entity : entities.getEntities()) {
ApplicationAttemptReport appAttempt =
convertToApplicationAttemptReport(entity);
appAttempts.put(appAttempt.getApplicationAttemptId(), appAttempt);
}
return appAttempts;
}
@Override
public ApplicationAttemptReport getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws YarnException, IOException {
return getApplicationAttempt(appAttemptId, true);
}
private ApplicationAttemptReport getApplicationAttempt(
ApplicationAttemptId appAttemptId, boolean checkACLs)
throws YarnException, IOException {
if (checkACLs) {
ApplicationReportExt app = getApplication(
appAttemptId.getApplicationId(),
ApplicationReportField.USER_AND_ACLS);
checkAccess(app);
}
TimelineEntity entity = timelineDataManager.getEntity(
AppAttemptMetricsConstants.ENTITY_TYPE,
appAttemptId.toString(), EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
if (entity == null) {
throw new ApplicationAttemptNotFoundException(
"The entity for application attempt " + appAttemptId +
" doesn't exist in the timeline store");
} else {
return convertToApplicationAttemptReport(entity);
}
}
@Override
public ContainerReport getContainer(ContainerId containerId)
throws YarnException, IOException {
ApplicationReportExt app = getApplication(
containerId.getApplicationAttemptId().getApplicationId(),
ApplicationReportField.USER_AND_ACLS);
checkAccess(app);
TimelineEntity entity = timelineDataManager.getEntity(
ContainerMetricsConstants.ENTITY_TYPE,
containerId.toString(), EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
if (entity == null) {
throw new ContainerNotFoundException(
"The entity for container " + containerId +
" doesn't exist in the timeline store");
} else {
return convertToContainerReport(
entity, serverHttpAddress, app.appReport.getUser());
}
}
@Override
public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
throws YarnException, IOException {
ApplicationAttemptReport appAttempt =
getApplicationAttempt(appAttemptId, false);
return getContainer(appAttempt.getAMContainerId());
}
@Override
public Map<ContainerId, ContainerReport> getContainers(
ApplicationAttemptId appAttemptId) throws YarnException, IOException {
ApplicationReportExt app = getApplication(
appAttemptId.getApplicationId(), ApplicationReportField.USER_AND_ACLS);
checkAccess(app);
TimelineEntities entities = timelineDataManager.getEntities(
ContainerMetricsConstants.ENTITY_TYPE,
new NameValuePair(
ContainerMetricsConstants.PARENT_PRIMARIY_FILTER,
appAttemptId.toString()), null, null, null,
null, null, Long.MAX_VALUE, EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
Map<ContainerId, ContainerReport> containers =
new LinkedHashMap<ContainerId, ContainerReport>();
if (entities != null && entities.getEntities() != null) {
for (TimelineEntity entity : entities.getEntities()) {
ContainerReport container = convertToContainerReport(
entity, serverHttpAddress, app.appReport.getUser());
containers.put(container.getContainerId(), container);
}
}
return containers;
}
private static ApplicationReportExt convertToApplicationReport(
TimelineEntity entity, ApplicationReportField field) {
String user = null;
String queue = null;
String name = null;
String type = null;
boolean unmanagedApplication = false;
long createdTime = 0;
long finishedTime = 0;
float progress = 0.0f;
ApplicationAttemptId latestApplicationAttemptId = null;
String diagnosticsInfo = null;
FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
YarnApplicationState state = YarnApplicationState.ACCEPTED;
ApplicationResourceUsageReport appResources = null;
Set<String> appTags = null;
Map<ApplicationAccessType, String> appViewACLs =
new HashMap<ApplicationAccessType, String>();
Map<String, Object> entityInfo = entity.getOtherInfo();
if (entityInfo != null) {
if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) {
user =
entityInfo.get(ApplicationMetricsConstants.USER_ENTITY_INFO)
.toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO)) {
String appViewACLsStr = entityInfo.get(
ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO).toString();
if (appViewACLsStr.length() > 0) {
appViewACLs.put(ApplicationAccessType.VIEW_APP, appViewACLsStr);
}
}
if (field == ApplicationReportField.USER_AND_ACLS) {
return new ApplicationReportExt(ApplicationReport.newInstance(
ConverterUtils.toApplicationId(entity.getEntityId()),
latestApplicationAttemptId, user, queue, name, null, -1, null, state,
diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null,
null, progress, type, null, appTags,
unmanagedApplication), appViewACLs);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
queue =
entityInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)
.toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.NAME_ENTITY_INFO)) {
name =
entityInfo.get(ApplicationMetricsConstants.NAME_ENTITY_INFO)
.toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
type =
entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO)
.toString();
}
if (entityInfo
.containsKey(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)) {
unmanagedApplication =
Boolean.parseBoolean(entityInfo.get(
ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)
.toString());
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_CPU_METRICS)) {
long vcoreSeconds=Long.parseLong(entityInfo.get(
ApplicationMetricsConstants.APP_CPU_METRICS).toString());
long memorySeconds=Long.parseLong(entityInfo.get(
ApplicationMetricsConstants.APP_MEM_METRICS).toString());
appResources=ApplicationResourceUsageReport
.newInstance(0, 0, null, null, null, memorySeconds, vcoreSeconds);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
appTags = new HashSet<String>();
Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
if (obj != null && obj instanceof Collection<?>) {
for(Object o : (Collection<?>)obj) {
if (o != null) {
appTags.add(o.toString());
}
}
}
}
}
List<TimelineEvent> events = entity.getEvents();
if (events != null) {
for (TimelineEvent event : events) {
if (event.getEventType().equals(
ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
createdTime = event.getTimestamp();
} else if (event.getEventType().equals(
ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
progress=1.0F;
finishedTime = event.getTimestamp();
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo
.containsKey(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)) {
latestApplicationAttemptId =
ConverterUtils
.toApplicationAttemptId(
eventInfo
.get(
ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)
.toString());
}
if (eventInfo
.containsKey(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
diagnosticsInfo =
eventInfo.get(
ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)) {
finalStatus =
FinalApplicationStatus.valueOf(eventInfo.get(
ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)
.toString());
}
if (eventInfo
.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
state =
YarnApplicationState.valueOf(eventInfo.get(
ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
}
}
}
}
return new ApplicationReportExt(ApplicationReport.newInstance(
ConverterUtils.toApplicationId(entity.getEntityId()),
latestApplicationAttemptId, user, queue, name, null, -1, null, state,
diagnosticsInfo, null, createdTime, finishedTime, finalStatus, appResources,
null, progress, type, null, appTags, unmanagedApplication), appViewACLs);
}
private static ApplicationAttemptReport convertToApplicationAttemptReport(
TimelineEntity entity) {
String host = null;
int rpcPort = -1;
ContainerId amContainerId = null;
String trackingUrl = null;
String originalTrackingUrl = null;
String diagnosticsInfo = null;
YarnApplicationAttemptState state = null;
List<TimelineEvent> events = entity.getEvents();
if (events != null) {
for (TimelineEvent event : events) {
if (event.getEventType().equals(
AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE)) {
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
host =
eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
rpcPort = (Integer) eventInfo.get(
AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
amContainerId =
ConverterUtils.toContainerId(eventInfo.get(
AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
.toString());
}
} else if (event.getEventType().equals(
AppAttemptMetricsConstants.FINISHED_EVENT_TYPE)) {
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
trackingUrl =
eventInfo.get(
AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
originalTrackingUrl =
eventInfo
.get(
AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
diagnosticsInfo =
eventInfo.get(
AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(AppAttemptMetricsConstants.STATE_EVENT_INFO)) {
state =
YarnApplicationAttemptState.valueOf(eventInfo.get(
AppAttemptMetricsConstants.STATE_EVENT_INFO)
.toString());
}
}
}
}
return ApplicationAttemptReport.newInstance(
ConverterUtils.toApplicationAttemptId(entity.getEntityId()),
host, rpcPort, trackingUrl, originalTrackingUrl, diagnosticsInfo,
state, amContainerId);
}
private static ContainerReport convertToContainerReport(
TimelineEntity entity, String serverHttpAddress, String user) {
int allocatedMem = 0;
int allocatedVcore = 0;
String allocatedHost = null;
int allocatedPort = -1;
int allocatedPriority = 0;
long createdTime = 0;
long finishedTime = 0;
String diagnosticsInfo = null;
int exitStatus = ContainerExitStatus.INVALID;
ContainerState state = null;
String nodeHttpAddress = null;
Map<String, Object> entityInfo = entity.getOtherInfo();
if (entityInfo != null) {
if (entityInfo
.containsKey(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO)) {
allocatedMem = (Integer) entityInfo.get(
ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO);
}
if (entityInfo
.containsKey(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO)) {
allocatedVcore = (Integer) entityInfo.get(
ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO);
}
if (entityInfo
.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO)) {
allocatedHost =
entityInfo
.get(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO)
.toString();
}
if (entityInfo
.containsKey(ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO)) {
allocatedPort = (Integer) entityInfo.get(
ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO);
}
if (entityInfo
.containsKey(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO)) {
allocatedPriority = (Integer) entityInfo.get(
ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO);
}
if (entityInfo.containsKey(
ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO)) {
nodeHttpAddress =
(String) entityInfo
.get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO);
}
}
List<TimelineEvent> events = entity.getEvents();
if (events != null) {
for (TimelineEvent event : events) {
if (event.getEventType().equals(
ContainerMetricsConstants.CREATED_EVENT_TYPE)) {
createdTime = event.getTimestamp();
} else if (event.getEventType().equals(
ContainerMetricsConstants.FINISHED_EVENT_TYPE)) {
finishedTime = event.getTimestamp();
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo
.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
diagnosticsInfo =
eventInfo.get(
ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)
.toString();
}
if (eventInfo
.containsKey(ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO)) {
exitStatus = (Integer) eventInfo.get(
ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO);
}
if (eventInfo
.containsKey(ContainerMetricsConstants.STATE_EVENT_INFO)) {
state =
ContainerState.valueOf(eventInfo.get(
ContainerMetricsConstants.STATE_EVENT_INFO).toString());
}
}
}
}
NodeId allocatedNode = NodeId.newInstance(allocatedHost, allocatedPort);
ContainerId containerId =
ConverterUtils.toContainerId(entity.getEntityId());
String logUrl = WebAppUtils.getAggregatedLogURL(
serverHttpAddress,
allocatedNode.toString(),
containerId.toString(),
containerId.toString(),
user);
return ContainerReport.newInstance(
ConverterUtils.toContainerId(entity.getEntityId()),
Resource.newInstance(allocatedMem, allocatedVcore),
NodeId.newInstance(allocatedHost, allocatedPort),
Priority.newInstance(allocatedPriority),
createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state,
nodeHttpAddress);
}
private ApplicationReportExt generateApplicationReport(TimelineEntity entity,
ApplicationReportField field) throws YarnException, IOException {
ApplicationReportExt app = convertToApplicationReport(entity, field);
// If only user and acls are pulled to check attempt(s)/container(s) access
// control, we can return immediately
if (field == ApplicationReportField.USER_AND_ACLS) {
return app;
}
try {
checkAccess(app);
if (app.appReport.getCurrentApplicationAttemptId() != null) {
ApplicationAttemptReport appAttempt = getApplicationAttempt(
app.appReport.getCurrentApplicationAttemptId(), false);
app.appReport.setHost(appAttempt.getHost());
app.appReport.setRpcPort(appAttempt.getRpcPort());
app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
}
} catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
// AuthorizationException is thrown because the user doesn't have access
// It's possible that the app is finished before the first attempt is created.
app.appReport.setDiagnostics(null);
app.appReport.setCurrentApplicationAttemptId(null);
}
if (app.appReport.getCurrentApplicationAttemptId() == null) {
app.appReport.setCurrentApplicationAttemptId(
ApplicationAttemptId.newInstance(app.appReport.getApplicationId(), -1));
}
if (app.appReport.getHost() == null) {
app.appReport.setHost(UNAVAILABLE);
}
if (app.appReport.getRpcPort() < 0) {
app.appReport.setRpcPort(-1);
}
if (app.appReport.getTrackingUrl() == null) {
app.appReport.setTrackingUrl(UNAVAILABLE);
}
if (app.appReport.getOriginalTrackingUrl() == null) {
app.appReport.setOriginalTrackingUrl(UNAVAILABLE);
}
if (app.appReport.getDiagnostics() == null) {
app.appReport.setDiagnostics("");
}
return app;
}
private ApplicationReportExt getApplication(ApplicationId appId,
ApplicationReportField field) throws YarnException, IOException {
TimelineEntity entity = timelineDataManager.getEntity(
ApplicationMetricsConstants.ENTITY_TYPE,
appId.toString(), EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
if (entity == null) {
throw new ApplicationNotFoundException("The entity for application " +
appId + " doesn't exist in the timeline store");
} else {
return generateApplicationReport(entity, field);
}
}
private void checkAccess(ApplicationReportExt app)
throws YarnException, IOException {
if (app.appViewACLs != null) {
aclsManager.addApplication(
app.appReport.getApplicationId(), app.appViewACLs);
try {
if (!aclsManager.checkAccess(UserGroupInformation.getCurrentUser(),
ApplicationAccessType.VIEW_APP, app.appReport.getUser(),
app.appReport.getApplicationId())) {
throw new AuthorizationException("User "
+ UserGroupInformation.getCurrentUser().getShortUserName()
+ " does not have privilage to see this application "
+ app.appReport.getApplicationId());
}
} finally {
aclsManager.removeApplication(app.appReport.getApplicationId());
}
}
}
private static enum ApplicationReportField {
ALL, // retrieve all the fields
USER_AND_ACLS // retrieve user and ACLs info only
}
private static class ApplicationReportExt {
private ApplicationReport appReport;
private Map<ApplicationAccessType, String> appViewACLs;
public ApplicationReportExt(
ApplicationReport appReport,
Map<ApplicationAccessType, String> appViewACLs) {
this.appReport = appReport;
this.appViewACLs = appViewACLs;
}
}
}
| 27,711 | 41.179604 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.file.tfile.TFile;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptFinishDataPBImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptStartDataPBImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationFinishDataPBImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationStartDataPBImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerFinishDataPBImpl;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ContainerStartDataPBImpl;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* File system implementation of {@link ApplicationHistoryStore}. In this
* implementation, one application will have just one file in the file system,
* which contains all the history data of one application, and its attempts and
* containers. {@link #applicationStarted(ApplicationStartData)} is supposed to
* be invoked first when writing any history data of one application and it will
* open a file, while {@link #applicationFinished(ApplicationFinishData)} is
* supposed to be last writing operation and will close the file.
*/
@Public
@Unstable
public class FileSystemApplicationHistoryStore extends AbstractService
implements ApplicationHistoryStore {
private static final Log LOG = LogFactory
.getLog(FileSystemApplicationHistoryStore.class);
private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot";
private static final int MIN_BLOCK_SIZE = 256 * 1024;
private static final String START_DATA_SUFFIX = "_start";
private static final String FINISH_DATA_SUFFIX = "_finish";
private static final FsPermission ROOT_DIR_UMASK = FsPermission
.createImmutable((short) 0740);
private static final FsPermission HISTORY_FILE_UMASK = FsPermission
.createImmutable((short) 0640);
private FileSystem fs;
private Path rootDirPath;
private ConcurrentMap<ApplicationId, HistoryFileWriter> outstandingWriters =
new ConcurrentHashMap<ApplicationId, HistoryFileWriter>();
public FileSystemApplicationHistoryStore() {
super(FileSystemApplicationHistoryStore.class.getName());
}
protected FileSystem getFileSystem(Path path, Configuration conf) throws Exception {
return path.getFileSystem(conf);
}
@Override
public void serviceStart() throws Exception {
Configuration conf = getConfig();
Path fsWorkingPath =
new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
conf.get("hadoop.tmp.dir") + "/yarn/timeline/generic-history"));
rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME);
try {
fs = getFileSystem(fsWorkingPath, conf);
if (!fs.isDirectory(rootDirPath)) {
fs.mkdirs(rootDirPath);
fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
}
} catch (IOException e) {
LOG.error("Error when initializing FileSystemHistoryStorage", e);
throw e;
}
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
try {
for (Entry<ApplicationId, HistoryFileWriter> entry : outstandingWriters
.entrySet()) {
entry.getValue().close();
}
outstandingWriters.clear();
} finally {
IOUtils.cleanup(LOG, fs);
}
super.serviceStop();
}
@Override
public ApplicationHistoryData getApplication(ApplicationId appId)
throws IOException {
HistoryFileReader hfReader = getHistoryFileReader(appId);
try {
boolean readStartData = false;
boolean readFinishData = false;
ApplicationHistoryData historyData =
ApplicationHistoryData.newInstance(appId, null, null, null, null,
Long.MIN_VALUE, Long.MIN_VALUE, Long.MAX_VALUE, null,
FinalApplicationStatus.UNDEFINED, null);
while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
HistoryFileReader.Entry entry = hfReader.next();
if (entry.key.id.equals(appId.toString())) {
if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
ApplicationStartData startData =
parseApplicationStartData(entry.value);
mergeApplicationHistoryData(historyData, startData);
readStartData = true;
} else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
ApplicationFinishData finishData =
parseApplicationFinishData(entry.value);
mergeApplicationHistoryData(historyData, finishData);
readFinishData = true;
}
}
}
if (!readStartData && !readFinishData) {
return null;
}
if (!readStartData) {
LOG.warn("Start information is missing for application " + appId);
}
if (!readFinishData) {
LOG.warn("Finish information is missing for application " + appId);
}
LOG.info("Completed reading history information of application " + appId);
return historyData;
} catch (IOException e) {
LOG.error("Error when reading history file of application " + appId, e);
throw e;
} finally {
hfReader.close();
}
}
@Override
public Map<ApplicationId, ApplicationHistoryData> getAllApplications()
throws IOException {
Map<ApplicationId, ApplicationHistoryData> historyDataMap =
new HashMap<ApplicationId, ApplicationHistoryData>();
FileStatus[] files = fs.listStatus(rootDirPath);
for (FileStatus file : files) {
ApplicationId appId =
ConverterUtils.toApplicationId(file.getPath().getName());
try {
ApplicationHistoryData historyData = getApplication(appId);
if (historyData != null) {
historyDataMap.put(appId, historyData);
}
} catch (IOException e) {
// Eat the exception not to disturb the getting the next
// ApplicationHistoryData
LOG.error("History information of application " + appId
+ " is not included into the result due to the exception", e);
}
}
return historyDataMap;
}
@Override
public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
getApplicationAttempts(ApplicationId appId) throws IOException {
Map<ApplicationAttemptId, ApplicationAttemptHistoryData> historyDataMap =
new HashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>();
HistoryFileReader hfReader = getHistoryFileReader(appId);
try {
while (hfReader.hasNext()) {
HistoryFileReader.Entry entry = hfReader.next();
if (entry.key.id.startsWith(
ConverterUtils.APPLICATION_ATTEMPT_PREFIX)) {
ApplicationAttemptId appAttemptId =
ConverterUtils.toApplicationAttemptId(entry.key.id);
if (appAttemptId.getApplicationId().equals(appId)) {
ApplicationAttemptHistoryData historyData =
historyDataMap.get(appAttemptId);
if (historyData == null) {
historyData = ApplicationAttemptHistoryData.newInstance(
appAttemptId, null, -1, null, null, null,
FinalApplicationStatus.UNDEFINED, null);
historyDataMap.put(appAttemptId, historyData);
}
if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
mergeApplicationAttemptHistoryData(historyData,
parseApplicationAttemptStartData(entry.value));
} else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
mergeApplicationAttemptHistoryData(historyData,
parseApplicationAttemptFinishData(entry.value));
}
}
}
}
LOG.info("Completed reading history information of all application"
+ " attempts of application " + appId);
} catch (IOException e) {
LOG.info("Error when reading history information of some application"
+ " attempts of application " + appId);
} finally {
hfReader.close();
}
return historyDataMap;
}
@Override
public ApplicationAttemptHistoryData getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws IOException {
HistoryFileReader hfReader =
getHistoryFileReader(appAttemptId.getApplicationId());
try {
boolean readStartData = false;
boolean readFinishData = false;
ApplicationAttemptHistoryData historyData =
ApplicationAttemptHistoryData.newInstance(appAttemptId, null, -1,
null, null, null, FinalApplicationStatus.UNDEFINED, null);
while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
HistoryFileReader.Entry entry = hfReader.next();
if (entry.key.id.equals(appAttemptId.toString())) {
if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
ApplicationAttemptStartData startData =
parseApplicationAttemptStartData(entry.value);
mergeApplicationAttemptHistoryData(historyData, startData);
readStartData = true;
} else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
ApplicationAttemptFinishData finishData =
parseApplicationAttemptFinishData(entry.value);
mergeApplicationAttemptHistoryData(historyData, finishData);
readFinishData = true;
}
}
}
if (!readStartData && !readFinishData) {
return null;
}
if (!readStartData) {
LOG.warn("Start information is missing for application attempt "
+ appAttemptId);
}
if (!readFinishData) {
LOG.warn("Finish information is missing for application attempt "
+ appAttemptId);
}
LOG.info("Completed reading history information of application attempt "
+ appAttemptId);
return historyData;
} catch (IOException e) {
LOG.error("Error when reading history file of application attempt"
+ appAttemptId, e);
throw e;
} finally {
hfReader.close();
}
}
@Override
public ContainerHistoryData getContainer(ContainerId containerId)
throws IOException {
HistoryFileReader hfReader =
getHistoryFileReader(containerId.getApplicationAttemptId()
.getApplicationId());
try {
boolean readStartData = false;
boolean readFinishData = false;
ContainerHistoryData historyData =
ContainerHistoryData
.newInstance(containerId, null, null, null, Long.MIN_VALUE,
Long.MAX_VALUE, null, Integer.MAX_VALUE, null);
while ((!readStartData || !readFinishData) && hfReader.hasNext()) {
HistoryFileReader.Entry entry = hfReader.next();
if (entry.key.id.equals(containerId.toString())) {
if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
ContainerStartData startData = parseContainerStartData(entry.value);
mergeContainerHistoryData(historyData, startData);
readStartData = true;
} else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
ContainerFinishData finishData =
parseContainerFinishData(entry.value);
mergeContainerHistoryData(historyData, finishData);
readFinishData = true;
}
}
}
if (!readStartData && !readFinishData) {
return null;
}
if (!readStartData) {
LOG.warn("Start information is missing for container " + containerId);
}
if (!readFinishData) {
LOG.warn("Finish information is missing for container " + containerId);
}
LOG.info("Completed reading history information of container "
+ containerId);
return historyData;
} catch (IOException e) {
LOG.error("Error when reading history file of container " + containerId, e);
throw e;
} finally {
hfReader.close();
}
}
@Override
public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
throws IOException {
ApplicationAttemptHistoryData attemptHistoryData =
getApplicationAttempt(appAttemptId);
if (attemptHistoryData == null
|| attemptHistoryData.getMasterContainerId() == null) {
return null;
}
return getContainer(attemptHistoryData.getMasterContainerId());
}
@Override
public Map<ContainerId, ContainerHistoryData> getContainers(
ApplicationAttemptId appAttemptId) throws IOException {
Map<ContainerId, ContainerHistoryData> historyDataMap =
new HashMap<ContainerId, ContainerHistoryData>();
HistoryFileReader hfReader =
getHistoryFileReader(appAttemptId.getApplicationId());
try {
while (hfReader.hasNext()) {
HistoryFileReader.Entry entry = hfReader.next();
if (entry.key.id.startsWith(ConverterUtils.CONTAINER_PREFIX)) {
ContainerId containerId =
ConverterUtils.toContainerId(entry.key.id);
if (containerId.getApplicationAttemptId().equals(appAttemptId)) {
ContainerHistoryData historyData =
historyDataMap.get(containerId);
if (historyData == null) {
historyData = ContainerHistoryData.newInstance(
containerId, null, null, null, Long.MIN_VALUE,
Long.MAX_VALUE, null, Integer.MAX_VALUE, null);
historyDataMap.put(containerId, historyData);
}
if (entry.key.suffix.equals(START_DATA_SUFFIX)) {
mergeContainerHistoryData(historyData,
parseContainerStartData(entry.value));
} else if (entry.key.suffix.equals(FINISH_DATA_SUFFIX)) {
mergeContainerHistoryData(historyData,
parseContainerFinishData(entry.value));
}
}
}
}
LOG.info("Completed reading history information of all conatiners"
+ " of application attempt " + appAttemptId);
} catch (IOException e) {
LOG.info("Error when reading history information of some containers"
+ " of application attempt " + appAttemptId);
} finally {
hfReader.close();
}
return historyDataMap;
}
@Override
public void applicationStarted(ApplicationStartData appStart)
throws IOException {
HistoryFileWriter hfWriter =
outstandingWriters.get(appStart.getApplicationId());
if (hfWriter == null) {
Path applicationHistoryFile =
new Path(rootDirPath, appStart.getApplicationId().toString());
try {
hfWriter = new HistoryFileWriter(applicationHistoryFile);
LOG.info("Opened history file of application "
+ appStart.getApplicationId());
} catch (IOException e) {
LOG.error("Error when openning history file of application "
+ appStart.getApplicationId(), e);
throw e;
}
outstandingWriters.put(appStart.getApplicationId(), hfWriter);
} else {
throw new IOException("History file of application "
+ appStart.getApplicationId() + " is already opened");
}
assert appStart instanceof ApplicationStartDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(appStart.getApplicationId()
.toString(), START_DATA_SUFFIX),
((ApplicationStartDataPBImpl) appStart).getProto().toByteArray());
LOG.info("Start information of application "
+ appStart.getApplicationId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing start information of application "
+ appStart.getApplicationId(), e);
throw e;
}
}
@Override
public void applicationFinished(ApplicationFinishData appFinish)
throws IOException {
HistoryFileWriter hfWriter =
getHistoryFileWriter(appFinish.getApplicationId());
assert appFinish instanceof ApplicationFinishDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(appFinish.getApplicationId()
.toString(), FINISH_DATA_SUFFIX),
((ApplicationFinishDataPBImpl) appFinish).getProto().toByteArray());
LOG.info("Finish information of application "
+ appFinish.getApplicationId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing finish information of application "
+ appFinish.getApplicationId(), e);
throw e;
} finally {
hfWriter.close();
outstandingWriters.remove(appFinish.getApplicationId());
}
}
@Override
public void applicationAttemptStarted(
ApplicationAttemptStartData appAttemptStart) throws IOException {
HistoryFileWriter hfWriter =
getHistoryFileWriter(appAttemptStart.getApplicationAttemptId()
.getApplicationId());
assert appAttemptStart instanceof ApplicationAttemptStartDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(appAttemptStart
.getApplicationAttemptId().toString(), START_DATA_SUFFIX),
((ApplicationAttemptStartDataPBImpl) appAttemptStart).getProto()
.toByteArray());
LOG.info("Start information of application attempt "
+ appAttemptStart.getApplicationAttemptId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing start information of application attempt "
+ appAttemptStart.getApplicationAttemptId(), e);
throw e;
}
}
@Override
public void applicationAttemptFinished(
ApplicationAttemptFinishData appAttemptFinish) throws IOException {
HistoryFileWriter hfWriter =
getHistoryFileWriter(appAttemptFinish.getApplicationAttemptId()
.getApplicationId());
assert appAttemptFinish instanceof ApplicationAttemptFinishDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(appAttemptFinish
.getApplicationAttemptId().toString(), FINISH_DATA_SUFFIX),
((ApplicationAttemptFinishDataPBImpl) appAttemptFinish).getProto()
.toByteArray());
LOG.info("Finish information of application attempt "
+ appAttemptFinish.getApplicationAttemptId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing finish information of application attempt "
+ appAttemptFinish.getApplicationAttemptId(), e);
throw e;
}
}
@Override
public void containerStarted(ContainerStartData containerStart)
throws IOException {
HistoryFileWriter hfWriter =
getHistoryFileWriter(containerStart.getContainerId()
.getApplicationAttemptId().getApplicationId());
assert containerStart instanceof ContainerStartDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(containerStart
.getContainerId().toString(), START_DATA_SUFFIX),
((ContainerStartDataPBImpl) containerStart).getProto().toByteArray());
LOG.info("Start information of container "
+ containerStart.getContainerId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing start information of container "
+ containerStart.getContainerId(), e);
throw e;
}
}
@Override
public void containerFinished(ContainerFinishData containerFinish)
throws IOException {
HistoryFileWriter hfWriter =
getHistoryFileWriter(containerFinish.getContainerId()
.getApplicationAttemptId().getApplicationId());
assert containerFinish instanceof ContainerFinishDataPBImpl;
try {
hfWriter.writeHistoryData(new HistoryDataKey(containerFinish
.getContainerId().toString(), FINISH_DATA_SUFFIX),
((ContainerFinishDataPBImpl) containerFinish).getProto().toByteArray());
LOG.info("Finish information of container "
+ containerFinish.getContainerId() + " is written");
} catch (IOException e) {
LOG.error("Error when writing finish information of container "
+ containerFinish.getContainerId(), e);
}
}
private static ApplicationStartData parseApplicationStartData(byte[] value)
throws InvalidProtocolBufferException {
return new ApplicationStartDataPBImpl(
ApplicationStartDataProto.parseFrom(value));
}
private static ApplicationFinishData parseApplicationFinishData(byte[] value)
throws InvalidProtocolBufferException {
return new ApplicationFinishDataPBImpl(
ApplicationFinishDataProto.parseFrom(value));
}
private static ApplicationAttemptStartData parseApplicationAttemptStartData(
byte[] value) throws InvalidProtocolBufferException {
return new ApplicationAttemptStartDataPBImpl(
ApplicationAttemptStartDataProto.parseFrom(value));
}
private static ApplicationAttemptFinishData
parseApplicationAttemptFinishData(byte[] value)
throws InvalidProtocolBufferException {
return new ApplicationAttemptFinishDataPBImpl(
ApplicationAttemptFinishDataProto.parseFrom(value));
}
private static ContainerStartData parseContainerStartData(byte[] value)
throws InvalidProtocolBufferException {
return new ContainerStartDataPBImpl(
ContainerStartDataProto.parseFrom(value));
}
private static ContainerFinishData parseContainerFinishData(byte[] value)
throws InvalidProtocolBufferException {
return new ContainerFinishDataPBImpl(
ContainerFinishDataProto.parseFrom(value));
}
private static void mergeApplicationHistoryData(
ApplicationHistoryData historyData, ApplicationStartData startData) {
historyData.setApplicationName(startData.getApplicationName());
historyData.setApplicationType(startData.getApplicationType());
historyData.setQueue(startData.getQueue());
historyData.setUser(startData.getUser());
historyData.setSubmitTime(startData.getSubmitTime());
historyData.setStartTime(startData.getStartTime());
}
private static void mergeApplicationHistoryData(
ApplicationHistoryData historyData, ApplicationFinishData finishData) {
historyData.setFinishTime(finishData.getFinishTime());
historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
historyData.setFinalApplicationStatus(finishData
.getFinalApplicationStatus());
historyData.setYarnApplicationState(finishData.getYarnApplicationState());
}
private static void mergeApplicationAttemptHistoryData(
ApplicationAttemptHistoryData historyData,
ApplicationAttemptStartData startData) {
historyData.setHost(startData.getHost());
historyData.setRPCPort(startData.getRPCPort());
historyData.setMasterContainerId(startData.getMasterContainerId());
}
private static void mergeApplicationAttemptHistoryData(
ApplicationAttemptHistoryData historyData,
ApplicationAttemptFinishData finishData) {
historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
historyData.setTrackingURL(finishData.getTrackingURL());
historyData.setFinalApplicationStatus(finishData
.getFinalApplicationStatus());
historyData.setYarnApplicationAttemptState(finishData
.getYarnApplicationAttemptState());
}
private static void mergeContainerHistoryData(
ContainerHistoryData historyData, ContainerStartData startData) {
historyData.setAllocatedResource(startData.getAllocatedResource());
historyData.setAssignedNode(startData.getAssignedNode());
historyData.setPriority(startData.getPriority());
historyData.setStartTime(startData.getStartTime());
}
private static void mergeContainerHistoryData(
ContainerHistoryData historyData, ContainerFinishData finishData) {
historyData.setFinishTime(finishData.getFinishTime());
historyData.setDiagnosticsInfo(finishData.getDiagnosticsInfo());
historyData.setContainerExitStatus(finishData.getContainerExitStatus());
historyData.setContainerState(finishData.getContainerState());
}
private HistoryFileWriter getHistoryFileWriter(ApplicationId appId)
throws IOException {
HistoryFileWriter hfWriter = outstandingWriters.get(appId);
if (hfWriter == null) {
throw new IOException("History file of application " + appId
+ " is not opened");
}
return hfWriter;
}
private HistoryFileReader getHistoryFileReader(ApplicationId appId)
throws IOException {
Path applicationHistoryFile = new Path(rootDirPath, appId.toString());
if (!fs.exists(applicationHistoryFile)) {
throw new IOException("History file for application " + appId
+ " is not found");
}
// The history file is still under writing
if (outstandingWriters.containsKey(appId)) {
throw new IOException("History file for application " + appId
+ " is under writing");
}
return new HistoryFileReader(applicationHistoryFile);
}
private class HistoryFileReader {
private class Entry {
private HistoryDataKey key;
private byte[] value;
public Entry(HistoryDataKey key, byte[] value) {
this.key = key;
this.value = value;
}
}
private TFile.Reader reader;
private TFile.Reader.Scanner scanner;
FSDataInputStream fsdis;
public HistoryFileReader(Path historyFile) throws IOException {
fsdis = fs.open(historyFile);
reader =
new TFile.Reader(fsdis, fs.getFileStatus(historyFile).getLen(),
getConfig());
reset();
}
public boolean hasNext() {
return !scanner.atEnd();
}
public Entry next() throws IOException {
TFile.Reader.Scanner.Entry entry = scanner.entry();
DataInputStream dis = entry.getKeyStream();
HistoryDataKey key = new HistoryDataKey();
key.readFields(dis);
dis = entry.getValueStream();
byte[] value = new byte[entry.getValueLength()];
dis.read(value);
scanner.advance();
return new Entry(key, value);
}
public void reset() throws IOException {
IOUtils.cleanup(LOG, scanner);
scanner = reader.createScanner();
}
public void close() {
IOUtils.cleanup(LOG, scanner, reader, fsdis);
}
}
private class HistoryFileWriter {
private FSDataOutputStream fsdos;
private TFile.Writer writer;
public HistoryFileWriter(Path historyFile) throws IOException {
if (fs.exists(historyFile)) {
fsdos = fs.append(historyFile);
} else {
fsdos = fs.create(historyFile);
}
fs.setPermission(historyFile, HISTORY_FILE_UMASK);
writer =
new TFile.Writer(fsdos, MIN_BLOCK_SIZE, getConfig().get(
YarnConfiguration.FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE,
YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null,
getConfig());
}
public synchronized void close() {
IOUtils.cleanup(LOG, writer, fsdos);
}
public synchronized void writeHistoryData(HistoryDataKey key, byte[] value)
throws IOException {
DataOutputStream dos = null;
try {
dos = writer.prepareAppendKey(-1);
key.write(dos);
} finally {
IOUtils.cleanup(LOG, dos);
}
try {
dos = writer.prepareAppendValue(value.length);
dos.write(value);
} finally {
IOUtils.cleanup(LOG, dos);
}
}
}
private static class HistoryDataKey implements Writable {
private String id;
private String suffix;
public HistoryDataKey() {
this(null, null);
}
public HistoryDataKey(String id, String suffix) {
this.id = id;
this.suffix = suffix;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(suffix);
}
@Override
public void readFields(DataInput in) throws IOException {
id = in.readUTF();
suffix = in.readUTF();
}
}
}
| 31,490 | 38.561558 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public interface ApplicationHistoryReader {
/**
* This method returns Application {@link ApplicationHistoryData} for the
* specified {@link ApplicationId}.
*
* @param appId
*
* @return {@link ApplicationHistoryData} for the ApplicationId.
* @throws IOException
*/
ApplicationHistoryData getApplication(ApplicationId appId) throws IOException;
/**
* This method returns all Application {@link ApplicationHistoryData}s
*
* @return map of {@link ApplicationId} to {@link ApplicationHistoryData}s.
* @throws IOException
*/
Map<ApplicationId, ApplicationHistoryData> getAllApplications()
throws IOException;
/**
* Application can have multiple application attempts
* {@link ApplicationAttemptHistoryData}. This method returns the all
* {@link ApplicationAttemptHistoryData}s for the Application.
*
* @param appId
*
* @return all {@link ApplicationAttemptHistoryData}s for the Application.
* @throws IOException
*/
Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
getApplicationAttempts(ApplicationId appId) throws IOException;
/**
* This method returns {@link ApplicationAttemptHistoryData} for specified
* {@link ApplicationId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return {@link ApplicationAttemptHistoryData} for ApplicationAttemptId
* @throws IOException
*/
ApplicationAttemptHistoryData getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws IOException;
/**
* This method returns {@link ContainerHistoryData} for specified
* {@link ContainerId}.
*
* @param containerId
* {@link ContainerId}
* @return {@link ContainerHistoryData} for ContainerId
* @throws IOException
*/
ContainerHistoryData getContainer(ContainerId containerId) throws IOException;
/**
* This method returns {@link ContainerHistoryData} for specified
* {@link ApplicationAttemptId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return {@link ContainerHistoryData} for ApplicationAttemptId
* @throws IOException
*/
ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
throws IOException;
/**
* This method returns Map{@link ContainerId} to {@link ContainerHistoryData}
* for specified {@link ApplicationAttemptId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return Map{@link ContainerId} to {@link ContainerHistoryData} for
* ApplicationAttemptId
* @throws IOException
*/
Map<ContainerId, ContainerHistoryData> getContainers(
ApplicationAttemptId appAttemptId) throws IOException;
}
| 4,285 | 35.322034 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public interface ApplicationHistoryManager {
/**
* This method returns Application {@link ApplicationReport} for the specified
* {@link ApplicationId}.
*
* @param appId
*
* @return {@link ApplicationReport} for the ApplicationId.
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
ApplicationReport getApplication(ApplicationId appId) throws YarnException,
IOException;
/**
* This method returns the given number of Application in the
* given appStartedTime period.
*
* {@link ApplicationReport}s.
*
* @param appsNum
* @param appStartedTimeBegin
* @param appStartedTimeEnd
*
* @return map of {@link ApplicationId} to {@link ApplicationReport}s.
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
Map<ApplicationId, ApplicationReport> getApplications(long appsNum,
long appStartedTimeBegin, long appStartedTimeEnd) throws YarnException,
IOException;
/**
* Application can have multiple application attempts
* {@link ApplicationAttemptReport}. This method returns the all
* {@link ApplicationAttemptReport}s for the Application.
*
* @param appId
*
* @return all {@link ApplicationAttemptReport}s for the Application.
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
Map<ApplicationAttemptId, ApplicationAttemptReport> getApplicationAttempts(
ApplicationId appId) throws YarnException, IOException;
/**
* This method returns {@link ApplicationAttemptReport} for specified
* {@link ApplicationId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return {@link ApplicationAttemptReport} for ApplicationAttemptId
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
ApplicationAttemptReport getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws YarnException, IOException;
/**
* This method returns {@link ContainerReport} for specified
* {@link ContainerId}.
*
* @param containerId
* {@link ContainerId}
* @return {@link ContainerReport} for ContainerId
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
ContainerReport getContainer(ContainerId containerId) throws YarnException,
IOException;
/**
* This method returns {@link ContainerReport} for specified
* {@link ApplicationAttemptId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return {@link ContainerReport} for ApplicationAttemptId
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
throws YarnException, IOException;
/**
* This method returns Map of {@link ContainerId} to {@link ContainerReport}
* for specified {@link ApplicationAttemptId}.
*
* @param appAttemptId
* {@link ApplicationAttemptId}
* @return Map of {@link ContainerId} to {@link ContainerReport} for
* ApplicationAttemptId
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
Map<ContainerId, ContainerReport> getContainers(
ApplicationAttemptId appAttemptId) throws YarnException, IOException;
}
| 4,865 | 31.225166 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/MemoryApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
/**
* In-memory implementation of {@link ApplicationHistoryStore}. This
* implementation is for test purpose only. If users improperly instantiate it,
* they may encounter reading and writing history data in different memory
* store.
*
*/
@Private
@Unstable
public class MemoryApplicationHistoryStore extends AbstractService implements
ApplicationHistoryStore {
private final ConcurrentMap<ApplicationId, ApplicationHistoryData> applicationData =
new ConcurrentHashMap<ApplicationId, ApplicationHistoryData>();
private final ConcurrentMap<ApplicationId, ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>> applicationAttemptData =
new ConcurrentHashMap<ApplicationId, ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>>();
private final ConcurrentMap<ApplicationAttemptId, ConcurrentMap<ContainerId, ContainerHistoryData>> containerData =
new ConcurrentHashMap<ApplicationAttemptId, ConcurrentMap<ContainerId, ContainerHistoryData>>();
public MemoryApplicationHistoryStore() {
super(MemoryApplicationHistoryStore.class.getName());
}
@Override
public Map<ApplicationId, ApplicationHistoryData> getAllApplications() {
return new HashMap<ApplicationId, ApplicationHistoryData>(applicationData);
}
@Override
public ApplicationHistoryData getApplication(ApplicationId appId) {
return applicationData.get(appId);
}
@Override
public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
getApplicationAttempts(ApplicationId appId) {
ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
applicationAttemptData.get(appId);
if (subMap == null) {
return Collections
.<ApplicationAttemptId, ApplicationAttemptHistoryData> emptyMap();
} else {
return new HashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>(
subMap);
}
}
@Override
public ApplicationAttemptHistoryData getApplicationAttempt(
ApplicationAttemptId appAttemptId) {
ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
applicationAttemptData.get(appAttemptId.getApplicationId());
if (subMap == null) {
return null;
} else {
return subMap.get(appAttemptId);
}
}
@Override
public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId) {
ApplicationAttemptHistoryData appAttempt =
getApplicationAttempt(appAttemptId);
if (appAttempt == null || appAttempt.getMasterContainerId() == null) {
return null;
} else {
return getContainer(appAttempt.getMasterContainerId());
}
}
@Override
public ContainerHistoryData getContainer(ContainerId containerId) {
Map<ContainerId, ContainerHistoryData> subMap =
containerData.get(containerId.getApplicationAttemptId());
if (subMap == null) {
return null;
} else {
return subMap.get(containerId);
}
}
@Override
public Map<ContainerId, ContainerHistoryData> getContainers(
ApplicationAttemptId appAttemptId) throws IOException {
ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
containerData.get(appAttemptId);
if (subMap == null) {
return Collections.<ContainerId, ContainerHistoryData> emptyMap();
} else {
return new HashMap<ContainerId, ContainerHistoryData>(subMap);
}
}
@Override
public void applicationStarted(ApplicationStartData appStart)
throws IOException {
ApplicationHistoryData oldData =
applicationData.putIfAbsent(appStart.getApplicationId(),
ApplicationHistoryData.newInstance(appStart.getApplicationId(),
appStart.getApplicationName(), appStart.getApplicationType(),
appStart.getQueue(), appStart.getUser(), appStart.getSubmitTime(),
appStart.getStartTime(), Long.MAX_VALUE, null, null, null));
if (oldData != null) {
throw new IOException("The start information of application "
+ appStart.getApplicationId() + " is already stored.");
}
}
@Override
public void applicationFinished(ApplicationFinishData appFinish)
throws IOException {
ApplicationHistoryData data =
applicationData.get(appFinish.getApplicationId());
if (data == null) {
throw new IOException("The finish information of application "
+ appFinish.getApplicationId() + " is stored before the start"
+ " information.");
}
// Make the assumption that YarnApplicationState should not be null if
// the finish information is already recorded
if (data.getYarnApplicationState() != null) {
throw new IOException("The finish information of application "
+ appFinish.getApplicationId() + " is already stored.");
}
data.setFinishTime(appFinish.getFinishTime());
data.setDiagnosticsInfo(appFinish.getDiagnosticsInfo());
data.setFinalApplicationStatus(appFinish.getFinalApplicationStatus());
data.setYarnApplicationState(appFinish.getYarnApplicationState());
}
@Override
public void applicationAttemptStarted(
ApplicationAttemptStartData appAttemptStart) throws IOException {
ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
getSubMap(appAttemptStart.getApplicationAttemptId().getApplicationId());
ApplicationAttemptHistoryData oldData =
subMap.putIfAbsent(appAttemptStart.getApplicationAttemptId(),
ApplicationAttemptHistoryData.newInstance(
appAttemptStart.getApplicationAttemptId(),
appAttemptStart.getHost(), appAttemptStart.getRPCPort(),
appAttemptStart.getMasterContainerId(), null, null, null, null));
if (oldData != null) {
throw new IOException("The start information of application attempt "
+ appAttemptStart.getApplicationAttemptId() + " is already stored.");
}
}
@Override
public void applicationAttemptFinished(
ApplicationAttemptFinishData appAttemptFinish) throws IOException {
ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData> subMap =
getSubMap(appAttemptFinish.getApplicationAttemptId().getApplicationId());
ApplicationAttemptHistoryData data =
subMap.get(appAttemptFinish.getApplicationAttemptId());
if (data == null) {
throw new IOException("The finish information of application attempt "
+ appAttemptFinish.getApplicationAttemptId() + " is stored before"
+ " the start information.");
}
// Make the assumption that YarnApplicationAttemptState should not be null
// if the finish information is already recorded
if (data.getYarnApplicationAttemptState() != null) {
throw new IOException("The finish information of application attempt "
+ appAttemptFinish.getApplicationAttemptId() + " is already stored.");
}
data.setTrackingURL(appAttemptFinish.getTrackingURL());
data.setDiagnosticsInfo(appAttemptFinish.getDiagnosticsInfo());
data
.setFinalApplicationStatus(appAttemptFinish.getFinalApplicationStatus());
data.setYarnApplicationAttemptState(appAttemptFinish
.getYarnApplicationAttemptState());
}
private ConcurrentMap<ApplicationAttemptId, ApplicationAttemptHistoryData>
getSubMap(ApplicationId appId) {
applicationAttemptData
.putIfAbsent(
appId,
new ConcurrentHashMap<ApplicationAttemptId, ApplicationAttemptHistoryData>());
return applicationAttemptData.get(appId);
}
@Override
public void containerStarted(ContainerStartData containerStart)
throws IOException {
ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
getSubMap(containerStart.getContainerId().getApplicationAttemptId());
ContainerHistoryData oldData =
subMap.putIfAbsent(containerStart.getContainerId(),
ContainerHistoryData.newInstance(containerStart.getContainerId(),
containerStart.getAllocatedResource(),
containerStart.getAssignedNode(), containerStart.getPriority(),
containerStart.getStartTime(), Long.MAX_VALUE, null,
Integer.MAX_VALUE, null));
if (oldData != null) {
throw new IOException("The start information of container "
+ containerStart.getContainerId() + " is already stored.");
}
}
@Override
public void containerFinished(ContainerFinishData containerFinish)
throws IOException {
ConcurrentMap<ContainerId, ContainerHistoryData> subMap =
getSubMap(containerFinish.getContainerId().getApplicationAttemptId());
ContainerHistoryData data = subMap.get(containerFinish.getContainerId());
if (data == null) {
throw new IOException("The finish information of container "
+ containerFinish.getContainerId() + " is stored before"
+ " the start information.");
}
// Make the assumption that ContainerState should not be null if
// the finish information is already recorded
if (data.getContainerState() != null) {
throw new IOException("The finish information of container "
+ containerFinish.getContainerId() + " is already stored.");
}
data.setFinishTime(containerFinish.getFinishTime());
data.setDiagnosticsInfo(containerFinish.getDiagnosticsInfo());
data.setContainerExitStatus(containerFinish.getContainerExitStatus());
data.setContainerState(containerFinish.getContainerState());
}
private ConcurrentMap<ContainerId, ContainerHistoryData> getSubMap(
ApplicationAttemptId appAttemptId) {
containerData.putIfAbsent(appAttemptId,
new ConcurrentHashMap<ContainerId, ContainerHistoryData>());
return containerData.get(appAttemptId);
}
}
| 12,001 | 42.643636 | 137 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/NullApplicationHistoryStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
/**
* Dummy implementation of {@link ApplicationHistoryStore}. If this
* implementation is used, no history data will be persisted.
*
*/
@Unstable
@Private
public class NullApplicationHistoryStore extends AbstractService implements
ApplicationHistoryStore {
public NullApplicationHistoryStore() {
super(NullApplicationHistoryStore.class.getName());
}
@Override
public void applicationStarted(ApplicationStartData appStart)
throws IOException {
}
@Override
public void applicationFinished(ApplicationFinishData appFinish)
throws IOException {
}
@Override
public void applicationAttemptStarted(
ApplicationAttemptStartData appAttemptStart) throws IOException {
}
@Override
public void applicationAttemptFinished(
ApplicationAttemptFinishData appAttemptFinish) throws IOException {
}
@Override
public void containerStarted(ContainerStartData containerStart)
throws IOException {
}
@Override
public void containerFinished(ContainerFinishData containerFinish)
throws IOException {
}
@Override
public ApplicationHistoryData getApplication(ApplicationId appId)
throws IOException {
return null;
}
@Override
public Map<ApplicationId, ApplicationHistoryData> getAllApplications()
throws IOException {
return Collections.emptyMap();
}
@Override
public Map<ApplicationAttemptId, ApplicationAttemptHistoryData>
getApplicationAttempts(ApplicationId appId) throws IOException {
return Collections.emptyMap();
}
@Override
public ApplicationAttemptHistoryData getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws IOException {
return null;
}
@Override
public ContainerHistoryData getContainer(ContainerId containerId)
throws IOException {
return null;
}
@Override
public ContainerHistoryData getAMContainer(ApplicationAttemptId appAttemptId)
throws IOException {
return null;
}
@Override
public Map<ContainerId, ContainerHistoryData> getContainers(
ApplicationAttemptId appAttemptId) throws IOException {
return Collections.emptyMap();
}
}
| 4,330 | 33.102362 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService;
import org.apache.hadoop.yarn.server.timeline.webapp.CrossOriginFilterInitializer;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebApps;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* History server that keeps track of all types of history in the cluster.
* Application specific history to start with.
*/
public class ApplicationHistoryServer extends CompositeService {
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
private static final Log LOG = LogFactory
.getLog(ApplicationHistoryServer.class);
private ApplicationHistoryClientService ahsClientService;
private ApplicationACLsManager aclsManager;
private ApplicationHistoryManager historyManager;
private TimelineStore timelineStore;
private TimelineDelegationTokenSecretManagerService secretManagerService;
private TimelineDataManager timelineDataManager;
private WebApp webApp;
public ApplicationHistoryServer() {
super(ApplicationHistoryServer.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
// init timeline services first
timelineStore = createTimelineStore(conf);
addIfService(timelineStore);
secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
addService(secretManagerService);
timelineDataManager = createTimelineDataManager(conf);
addService(timelineDataManager);
// init generic history service afterwards
aclsManager = createApplicationACLsManager(conf);
historyManager = createApplicationHistoryManager(conf);
ahsClientService = createApplicationHistoryClientService(historyManager);
addService(ahsClientService);
addService((Service) historyManager);
DefaultMetricsSystem.initialize("ApplicationHistoryServer");
JvmMetrics.initSingleton("ApplicationHistoryServer", null);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
try {
doSecureLogin(getConfig());
} catch(IOException ie) {
throw new YarnRuntimeException("Failed to login", ie);
}
super.serviceStart();
startWebApp();
}
@Override
protected void serviceStop() throws Exception {
if (webApp != null) {
webApp.stop();
}
DefaultMetricsSystem.shutdown();
super.serviceStop();
}
@Private
@VisibleForTesting
ApplicationHistoryClientService getClientService() {
return this.ahsClientService;
}
/**
* @return ApplicationTimelineStore
*/
@Private
@VisibleForTesting
public TimelineStore getTimelineStore() {
return timelineStore;
}
@Private
@VisibleForTesting
ApplicationHistoryManager getApplicationHistoryManager() {
return this.historyManager;
}
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
public static void main(String[] args) {
launchAppHistoryServer(args);
}
private ApplicationHistoryClientService
createApplicationHistoryClientService(
ApplicationHistoryManager historyManager) {
return new ApplicationHistoryClientService(historyManager);
}
private ApplicationACLsManager createApplicationACLsManager(
Configuration conf) {
return new ApplicationACLsManager(conf);
}
private ApplicationHistoryManager createApplicationHistoryManager(
Configuration conf) {
// Backward compatibility:
// APPLICATION_HISTORY_STORE is neither null nor empty, it means that the
// user has enabled it explicitly.
if (conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE) == null ||
conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE).length() == 0 ||
conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE).equals(
NullApplicationHistoryStore.class.getName())) {
return new ApplicationHistoryManagerOnTimelineStore(
timelineDataManager, aclsManager);
} else {
LOG.warn("The filesystem based application history store is deprecated.");
return new ApplicationHistoryManagerImpl();
}
}
private TimelineStore createTimelineStore(
Configuration conf) {
return ReflectionUtils.newInstance(conf.getClass(
YarnConfiguration.TIMELINE_SERVICE_STORE, LeveldbTimelineStore.class,
TimelineStore.class), conf);
}
private TimelineDelegationTokenSecretManagerService
createTimelineDelegationTokenSecretManagerService(Configuration conf) {
return new TimelineDelegationTokenSecretManagerService();
}
private TimelineDataManager createTimelineDataManager(Configuration conf) {
return new TimelineDataManager(
timelineStore, new TimelineACLsManager(conf));
}
private void startWebApp() {
Configuration conf = getConfig();
TimelineAuthenticationFilter.setTimelineDelegationTokenSecretManager(
secretManagerService.getTimelineDelegationTokenSecretManager());
// Always load pseudo authentication filter to parse "user.name" in an URL
// to identify a HTTP request's user in insecure mode.
// When Kerberos authentication type is set (i.e., secure mode is turned on),
// the customized filter will be loaded by the timeline server to do Kerberos
// + DT authentication.
String initializers = conf.get("hadoop.http.filter.initializers");
boolean modifiedInitializers = false;
initializers =
initializers == null || initializers.length() == 0 ? "" : initializers;
if (!initializers.contains(CrossOriginFilterInitializer.class.getName())) {
if(conf.getBoolean(YarnConfiguration
.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED, YarnConfiguration
.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
initializers = CrossOriginFilterInitializer.class.getName() + ","
+ initializers;
modifiedInitializers = true;
}
}
if (!initializers.contains(TimelineAuthenticationFilterInitializer.class
.getName())) {
initializers =
TimelineAuthenticationFilterInitializer.class.getName() + ","
+ initializers;
modifiedInitializers = true;
}
String[] parts = initializers.split(",");
ArrayList<String> target = new ArrayList<String>();
for (String filterInitializer : parts) {
filterInitializer = filterInitializer.trim();
if (filterInitializer.equals(AuthenticationFilterInitializer.class
.getName())) {
modifiedInitializers = true;
continue;
}
target.add(filterInitializer);
}
String actualInitializers =
org.apache.commons.lang.StringUtils.join(target, ",");
if (modifiedInitializers) {
conf.set("hadoop.http.filter.initializers", actualInitializers);
}
String bindAddress = WebAppUtils.getWebAppBindURL(conf,
YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
LOG.info("Instantiating AHSWebApp at " + bindAddress);
try {
webApp =
WebApps
.$for("applicationhistory", ApplicationHistoryClientService.class,
ahsClientService, "ws")
.with(conf).at(bindAddress).start(
new AHSWebApp(timelineDataManager, ahsClientService));
} catch (Exception e) {
String msg = "AHSWebApp failed to start.";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
}
private void doSecureLogin(Configuration conf) throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, socAddr.getHostName());
}
/**
* Retrieve the timeline server bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
private static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
}
}
| 11,575 | 37.458472 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
public class ApplicationHistoryManagerImpl extends AbstractService implements
ApplicationHistoryManager {
private static final Log LOG = LogFactory
.getLog(ApplicationHistoryManagerImpl.class);
private static final String UNAVAILABLE = "N/A";
private ApplicationHistoryStore historyStore;
private String serverHttpAddress;
public ApplicationHistoryManagerImpl() {
super(ApplicationHistoryManagerImpl.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
LOG.info("ApplicationHistory Init");
historyStore = createApplicationHistoryStore(conf);
historyStore.init(conf);
serverHttpAddress = WebAppUtils.getHttpSchemePrefix(conf) +
WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
LOG.info("Starting ApplicationHistory");
historyStore.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Stopping ApplicationHistory");
historyStore.stop();
super.serviceStop();
}
protected ApplicationHistoryStore createApplicationHistoryStore(
Configuration conf) {
return ReflectionUtils.newInstance(conf.getClass(
YarnConfiguration.APPLICATION_HISTORY_STORE,
FileSystemApplicationHistoryStore.class,
ApplicationHistoryStore.class), conf);
}
@Override
public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
throws IOException {
ApplicationReport app =
getApplication(appAttemptId.getApplicationId());
return convertToContainerReport(historyStore.getAMContainer(appAttemptId),
app == null ? null : app.getUser());
}
@Override
public Map<ApplicationId, ApplicationReport> getApplications(long appsNum,
long appStartedTimeBegin, long appStartedTimeEnd) throws IOException {
Map<ApplicationId, ApplicationHistoryData> histData =
historyStore.getAllApplications();
HashMap<ApplicationId, ApplicationReport> applicationsReport =
new HashMap<ApplicationId, ApplicationReport>();
for (Entry<ApplicationId, ApplicationHistoryData> entry : histData
.entrySet()) {
applicationsReport.put(entry.getKey(),
convertToApplicationReport(entry.getValue()));
}
return applicationsReport;
}
@Override
public ApplicationReport getApplication(ApplicationId appId)
throws IOException {
return convertToApplicationReport(historyStore.getApplication(appId));
}
private ApplicationReport convertToApplicationReport(
ApplicationHistoryData appHistory) throws IOException {
ApplicationAttemptId currentApplicationAttemptId = null;
String trackingUrl = UNAVAILABLE;
String host = UNAVAILABLE;
int rpcPort = -1;
ApplicationAttemptHistoryData lastAttempt =
getLastAttempt(appHistory.getApplicationId());
if (lastAttempt != null) {
currentApplicationAttemptId = lastAttempt.getApplicationAttemptId();
trackingUrl = lastAttempt.getTrackingURL();
host = lastAttempt.getHost();
rpcPort = lastAttempt.getRPCPort();
}
return ApplicationReport.newInstance(appHistory.getApplicationId(),
currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(),
appHistory.getApplicationName(), host, rpcPort, null,
appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(),
appHistory.getFinalApplicationStatus(), null, "", 100,
appHistory.getApplicationType(), null);
}
private ApplicationAttemptHistoryData getLastAttempt(ApplicationId appId)
throws IOException {
Map<ApplicationAttemptId, ApplicationAttemptHistoryData> attempts =
historyStore.getApplicationAttempts(appId);
ApplicationAttemptId prevMaxAttemptId = null;
for (ApplicationAttemptId attemptId : attempts.keySet()) {
if (prevMaxAttemptId == null) {
prevMaxAttemptId = attemptId;
} else {
if (prevMaxAttemptId.getAttemptId() < attemptId.getAttemptId()) {
prevMaxAttemptId = attemptId;
}
}
}
return attempts.get(prevMaxAttemptId);
}
private ApplicationAttemptReport convertToApplicationAttemptReport(
ApplicationAttemptHistoryData appAttemptHistory) {
return ApplicationAttemptReport.newInstance(
appAttemptHistory.getApplicationAttemptId(), appAttemptHistory.getHost(),
appAttemptHistory.getRPCPort(), appAttemptHistory.getTrackingURL(), null,
appAttemptHistory.getDiagnosticsInfo(),
appAttemptHistory.getYarnApplicationAttemptState(),
appAttemptHistory.getMasterContainerId());
}
@Override
public ApplicationAttemptReport getApplicationAttempt(
ApplicationAttemptId appAttemptId) throws IOException {
return convertToApplicationAttemptReport(historyStore
.getApplicationAttempt(appAttemptId));
}
@Override
public Map<ApplicationAttemptId, ApplicationAttemptReport>
getApplicationAttempts(ApplicationId appId) throws IOException {
Map<ApplicationAttemptId, ApplicationAttemptHistoryData> histData =
historyStore.getApplicationAttempts(appId);
HashMap<ApplicationAttemptId, ApplicationAttemptReport> applicationAttemptsReport =
new HashMap<ApplicationAttemptId, ApplicationAttemptReport>();
for (Entry<ApplicationAttemptId, ApplicationAttemptHistoryData> entry : histData
.entrySet()) {
applicationAttemptsReport.put(entry.getKey(),
convertToApplicationAttemptReport(entry.getValue()));
}
return applicationAttemptsReport;
}
@Override
public ContainerReport getContainer(ContainerId containerId)
throws IOException {
ApplicationReport app =
getApplication(containerId.getApplicationAttemptId().getApplicationId());
return convertToContainerReport(historyStore.getContainer(containerId),
app == null ? null: app.getUser());
}
private ContainerReport convertToContainerReport(
ContainerHistoryData containerHistory, String user) {
// If the container has the aggregated log, add the server root url
String logUrl = WebAppUtils.getAggregatedLogURL(
serverHttpAddress,
containerHistory.getAssignedNode().toString(),
containerHistory.getContainerId().toString(),
containerHistory.getContainerId().toString(),
user);
return ContainerReport.newInstance(containerHistory.getContainerId(),
containerHistory.getAllocatedResource(),
containerHistory.getAssignedNode(), containerHistory.getPriority(),
containerHistory.getStartTime(), containerHistory.getFinishTime(),
containerHistory.getDiagnosticsInfo(), logUrl,
containerHistory.getContainerExitStatus(),
containerHistory.getContainerState(), null);
}
@Override
public Map<ContainerId, ContainerReport> getContainers(
ApplicationAttemptId appAttemptId) throws IOException {
ApplicationReport app =
getApplication(appAttemptId.getApplicationId());
Map<ContainerId, ContainerHistoryData> histData =
historyStore.getContainers(appAttemptId);
HashMap<ContainerId, ContainerReport> containersReport =
new HashMap<ContainerId, ContainerReport>();
for (Entry<ContainerId, ContainerHistoryData> entry : histData.entrySet()) {
containersReport.put(entry.getKey(),
convertToContainerReport(entry.getValue(),
app == null ? null : app.getUser()));
}
return containersReport;
}
@Private
@VisibleForTesting
public ApplicationHistoryStore getHistoryStore() {
return this.historyStore;
}
}
| 9,821 | 39.254098 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import com.google.inject.Inject;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.View;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
public class AboutBlock extends HtmlBlock {
@Inject
AboutBlock(View.ViewContext ctx) {
super(ctx);
}
@Override
protected void render(Block html) {
TimelineAbout tsInfo = TimelineUtils.createTimelineAbout(
"Timeline Server - Generic History Service UI");
info("Timeline Server Overview").
_("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() +
" on " + tsInfo.getTimelineServiceVersionBuiltOn()).
_("Hadoop Version:", tsInfo.getHadoopBuildVersion() +
" on " + tsInfo.getHadoopVersionBuiltOn());
html._(InfoBlock.class);
}
}
| 1,901 | 38.625 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/JAXBContextResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
import com.google.inject.Singleton;
import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.api.json.JSONJAXBContext;
@Singleton
@Provider
@SuppressWarnings("rawtypes")
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
private JAXBContext context;
private final Set<Class> types;
// you have to specify all the dao classes here
private final Class[] cTypes = { AppInfo.class, AppsInfo.class,
AppAttemptInfo.class, AppAttemptsInfo.class, ContainerInfo.class,
ContainersInfo.class };
public JAXBContextResolver() throws Exception {
this.types = new HashSet<Class>(Arrays.asList(cTypes));
this.context =
new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false)
.build(), cTypes);
}
@Override
public JAXBContext getContext(Class<?> objectType) {
return (types.contains(objectType)) ? context : null;
}
}
| 2,355 | 35.246154 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import org.apache.hadoop.yarn.webapp.Controller;
import com.google.inject.Inject;
public class AHSController extends Controller {
@Inject
AHSController(RequestContext ctx) {
super(ctx);
}
@Override
public void index() {
setTitle("Application History");
}
public void about() {
render(AboutPage.class);
}
public void app() {
render(AppPage.class);
}
public void appattempt() {
render(AppAttemptPage.class);
}
public void container() {
render(ContainerPage.class);
}
/**
* Render the logs page.
*/
public void logs() {
render(AHSLogsPage.class);
}
public void errorsAndWarnings() {
render(AHSErrorsAndWarningsPage.class);
}
}
| 1,591 | 23.875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import static org.apache.hadoop.yarn.util.StringHelper.join;
public class AboutPage extends AHSView {
@Override protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
set(TITLE, "Timeline Server - Generic History Service");
}
@Override protected Class<? extends SubView> content() {
return AboutBlock.class;
}
}
| 1,325 | 34.837838 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import org.apache.hadoop.yarn.server.webapp.ErrorsAndWarningsBlock;
import org.apache.hadoop.yarn.webapp.SubView;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
/**
* Class to display the Errors and Warnings page for the AHS.
*/
public class AHSErrorsAndWarningsPage extends AHSView {
@Override
protected Class<? extends SubView> content() {
return ErrorsAndWarningsBlock.class;
}
@Override
protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
String title = "Errors and Warnings in the Application History Server";
setTitle(title);
String tableId = "messages";
set(DATATABLES_ID, tableId);
set(initID(DATATABLES, tableId), tablesInit());
setTableStyles(html, tableId, ".message {width:50em}",
".count {width:8em}", ".lasttime {width:16em}");
}
private String tablesInit() {
StringBuilder b = tableInit().append(", aoColumnDefs: [");
b.append("{'sType': 'string', 'aTargets': [ 0 ]}");
b.append(", {'sType': 'string', 'bSearchable': true, 'aTargets': [ 1 ]}");
b.append(", {'sType': 'numeric', 'bSearchable': false, 'aTargets': [ 2 ]}");
b.append(", {'sType': 'date', 'aTargets': [ 3 ] }]");
b.append(", aaSorting: [[3, 'desc']]}");
return b.toString();
}
}
| 2,156 | 36.189655 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import org.apache.hadoop.yarn.server.webapp.AppBlock;
import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
public class AppPage extends AHSView {
@Override
protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
String appId = $(YarnWebParams.APPLICATION_ID);
set(
TITLE,
appId.isEmpty() ? "Bad request: missing application ID" : join(
"Application ", $(YarnWebParams.APPLICATION_ID)));
set(DATATABLES_ID, "attempts ResourceRequests");
set(initID(DATATABLES, "attempts"), WebPageUtils.attemptsTableInit());
setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
setTableStyles(html, "ResourceRequests");
set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
}
@Override
protected Class<? extends SubView> content() {
return AppBlock.class;
}
protected String getAttemptsTableColumnDefs() {
StringBuilder sb = new StringBuilder();
return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
.append(", 'mRender': parseHadoopID }")
.append("\n, {'sType':'numeric', 'aTargets': [1]")
.append(", 'mRender': renderHadoopDate }]").toString();
}
}
| 2,452 | 36.738462 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import org.apache.hadoop.yarn.server.webapp.ContainerBlock;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
public class ContainerPage extends AHSView {
@Override
protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
String containerId = $(YarnWebParams.CONTAINER_ID);
set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID"
: join("Container ", $(YarnWebParams.CONTAINER_ID)));
}
@Override
protected Class<? extends SubView> content() {
return ContainerBlock.class;
}
}
| 1,528 | 36.292683 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
public class AppAttemptPage extends AHSView {
@Override
protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);
set(
TITLE,
appAttemptId.isEmpty() ? "Bad request: missing application attempt ID"
: join("Application Attempt ",
$(YarnWebParams.APPLICATION_ATTEMPT_ID)));
set(DATATABLES_ID, "containers");
set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
}
@Override
protected Class<? extends SubView> content() {
return AppAttemptBlock.class;
}
protected String getContainersTableColumnDefs() {
StringBuilder sb = new StringBuilder();
return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
.append(", 'mRender': parseHadoopID }]").toString();
}
}
| 2,371 | 37.885246 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import org.apache.hadoop.yarn.server.webapp.AppsBlock;
import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
// Do NOT rename/refactor this to AHSView as it will wreak havoc
// on Mac OS HFS
public class AHSView extends TwoColumnLayout {
static final int MAX_DISPLAY_ROWS = 100; // direct table rendering
static final int MAX_FAST_ROWS = 1000; // inline js array
@Override
protected void preHead(Page.HTML<_> html) {
commonPreHead(html);
set(DATATABLES_ID, "apps");
set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit(false));
setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
// Set the correct title.
String reqState = $(APP_STATE);
reqState = (reqState == null || reqState.isEmpty() ? "All" : reqState);
setTitle(sjoin(reqState, "Applications"));
}
protected void commonPreHead(Page.HTML<_> html) {
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
}
@Override
protected Class<? extends SubView> nav() {
return NavBlock.class;
}
@Override
protected Class<? extends SubView> content() {
return AppsBlock.class;
}
}
| 2,635 | 38.343284 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
public class AHSWebApp extends WebApp implements YarnWebParams {
private final ApplicationHistoryClientService historyClientService;
private TimelineDataManager timelineDataManager;
public AHSWebApp(TimelineDataManager timelineDataManager,
ApplicationHistoryClientService historyClientService) {
this.timelineDataManager = timelineDataManager;
this.historyClientService = historyClientService;
}
public ApplicationHistoryClientService getApplicationHistoryClientService() {
return historyClientService;
}
public TimelineDataManager getTimelineDataManager() {
return timelineDataManager;
}
@Override
public void setup() {
bind(YarnJacksonJaxbJsonProvider.class);
bind(AHSWebServices.class);
bind(TimelineWebServices.class);
bind(GenericExceptionHandler.class);
bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
bind(TimelineDataManager.class).toInstance(timelineDataManager);
route("/", AHSController.class);
route("/about", AHSController.class, "about");
route(pajoin("/apps", APP_STATE), AHSController.class);
route(pajoin("/app", APPLICATION_ID), AHSController.class, "app");
route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), AHSController.class,
"appattempt");
route(pajoin("/container", CONTAINER_ID), AHSController.class, "container");
route(
pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
CONTAINER_LOG_TYPE), AHSController.class, "logs");
route("/errors-and-warnings", AHSController.class, "errorsAndWarnings");
}
}
| 3,058 | 42.7 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import java.util.Collections;
import java.util.Set;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.server.webapp.WebServices;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
@Singleton
@Path("/ws/v1/applicationhistory")
public class AHSWebServices extends WebServices {
@Inject
public AHSWebServices(ApplicationBaseProtocol appBaseProt) {
super(appBaseProt);
}
@GET
@Path("/about")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TimelineAbout about(
@Context HttpServletRequest req,
@Context HttpServletResponse res) {
init(res);
return TimelineUtils.createTimelineAbout("Generic History Service API");
}
@GET
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public AppsInfo get(@Context HttpServletRequest req,
@Context HttpServletResponse res) {
return getApps(req, res, null, Collections.<String> emptySet(), null, null,
null, null, null, null, null, null, Collections.<String> emptySet());
}
@GET
@Path("/apps")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public AppsInfo getApps(@Context HttpServletRequest req,
@Context HttpServletResponse res, @QueryParam("state") String stateQuery,
@QueryParam("states") Set<String> statesQuery,
@QueryParam("finalStatus") String finalStatusQuery,
@QueryParam("user") String userQuery,
@QueryParam("queue") String queueQuery,
@QueryParam("limit") String count,
@QueryParam("startedTimeBegin") String startedBegin,
@QueryParam("startedTimeEnd") String startedEnd,
@QueryParam("finishedTimeBegin") String finishBegin,
@QueryParam("finishedTimeEnd") String finishEnd,
@QueryParam("applicationTypes") Set<String> applicationTypes) {
init(res);
validateStates(stateQuery, statesQuery);
return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery,
userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin,
finishEnd, applicationTypes);
}
@GET
@Path("/apps/{appid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public AppInfo getApp(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId) {
init(res);
return super.getApp(req, res, appId);
}
@GET
@Path("/apps/{appid}/appattempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId) {
init(res);
return super.getAppAttempts(req, res, appId);
}
@GET
@Path("/apps/{appid}/appattempts/{appattemptid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public AppAttemptInfo getAppAttempt(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId) {
init(res);
return super.getAppAttempt(req, res, appId, appAttemptId);
}
@GET
@Path("/apps/{appid}/appattempts/{appattemptid}/containers")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public ContainersInfo getContainers(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId) {
init(res);
return super.getContainers(req, res, appId, appAttemptId);
}
@GET
@Path("/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Override
public ContainerInfo getContainer(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId,
@PathParam("containerid") String containerId) {
init(res);
return super.getContainer(req, res, appId, appAttemptId, containerId);
}
private static void
validateStates(String stateQuery, Set<String> statesQuery) {
// stateQuery is deprecated.
if (stateQuery != null && !stateQuery.isEmpty()) {
statesQuery.add(stateQuery);
}
Set<String> appStates = parseQueries(statesQuery, true);
for (String appState : appStates) {
switch (YarnApplicationState.valueOf(
StringUtils.toUpperCase(appState))) {
case FINISHED:
case FAILED:
case KILLED:
continue;
default:
throw new BadRequestException("Invalid application-state " + appState
+ " specified. It should be a final state");
}
}
}
}
| 6,666 | 36.880682 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
public class NavBlock extends HtmlBlock {
@Override
public void render(Block html) {
boolean addErrorsAndWarningsLink = false;
Log log = LogFactory.getLog(NavBlock.class);
if (log instanceof Log4JLogger) {
Log4jWarningErrorMetricsAppender appender =
Log4jWarningErrorMetricsAppender.findAppender();
if (appender != null) {
addErrorsAndWarningsLink = true;
}
}
Hamlet.DIV<Hamlet> nav = html.
div("#nav").
h3("Application History").
ul().
li().a(url("about"), "About").
_().
li().a(url("apps"), "Applications").
ul().
li().a(url("apps",
YarnApplicationState.FINISHED.toString()),
YarnApplicationState.FINISHED.toString()).
_().
li().a(url("apps",
YarnApplicationState.FAILED.toString()),
YarnApplicationState.FAILED.toString()).
_().
li().a(url("apps",
YarnApplicationState.KILLED.toString()),
YarnApplicationState.KILLED.toString()).
_().
_().
_().
_();
Hamlet.UL<Hamlet.DIV<Hamlet>> tools = nav.h3("Tools").ul();
tools.li().a("/conf", "Configuration")._()
.li().a("/logs", "Local logs")._()
.li().a("/stacks", "Server stacks")._()
.li().a("/jmx?qry=Hadoop:*", "Server metrics")._();
if (addErrorsAndWarningsLink) {
tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._();
}
tools._()._();
}
}
| 3,097 | 38.717949 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlock;
public class AHSLogsPage extends AHSView {
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSView#
* preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<_> html) {
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = $(CONTAINER_ID);
}
if (logEntity == null || logEntity.isEmpty()) {
logEntity = "UNKNOWN";
}
commonPreHead(html);
}
/**
* The content of this page is the AggregatedLogsBlock
*
* @return AggregatedLogsBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
}
}
| 1,893 | 32.821429 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerHistoryData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* The class contains all the fields that are stored persistently for
* <code>RMContainer</code>.
*/
@Public
@Unstable
public class ContainerHistoryData {
private ContainerId containerId;
private Resource allocatedResource;
private NodeId assignedNode;
private Priority priority;
private long startTime;
private long finishTime;
private String diagnosticsInfo;
private int containerExitStatus;
private ContainerState containerState;
@Public
@Unstable
public static ContainerHistoryData newInstance(ContainerId containerId,
Resource allocatedResource, NodeId assignedNode, Priority priority,
long startTime, long finishTime, String diagnosticsInfo,
int containerExitCode, ContainerState containerState) {
ContainerHistoryData containerHD = new ContainerHistoryData();
containerHD.setContainerId(containerId);
containerHD.setAllocatedResource(allocatedResource);
containerHD.setAssignedNode(assignedNode);
containerHD.setPriority(priority);
containerHD.setStartTime(startTime);
containerHD.setFinishTime(finishTime);
containerHD.setDiagnosticsInfo(diagnosticsInfo);
containerHD.setContainerExitStatus(containerExitCode);
containerHD.setContainerState(containerState);
return containerHD;
}
@Public
@Unstable
public ContainerId getContainerId() {
return containerId;
}
@Public
@Unstable
public void setContainerId(ContainerId containerId) {
this.containerId = containerId;
}
@Public
@Unstable
public Resource getAllocatedResource() {
return allocatedResource;
}
@Public
@Unstable
public void setAllocatedResource(Resource resource) {
this.allocatedResource = resource;
}
@Public
@Unstable
public NodeId getAssignedNode() {
return assignedNode;
}
@Public
@Unstable
public void setAssignedNode(NodeId nodeId) {
this.assignedNode = nodeId;
}
@Public
@Unstable
public Priority getPriority() {
return priority;
}
@Public
@Unstable
public void setPriority(Priority priority) {
this.priority = priority;
}
@Public
@Unstable
public long getStartTime() {
return startTime;
}
@Public
@Unstable
public void setStartTime(long startTime) {
this.startTime = startTime;
}
@Public
@Unstable
public long getFinishTime() {
return finishTime;
}
@Public
@Unstable
public void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
@Public
@Unstable
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
@Public
@Unstable
public void setDiagnosticsInfo(String diagnosticsInfo) {
this.diagnosticsInfo = diagnosticsInfo;
}
@Public
@Unstable
public int getContainerExitStatus() {
return containerExitStatus;
}
@Public
@Unstable
public void setContainerExitStatus(int containerExitStatus) {
this.containerExitStatus = containerExitStatus;
}
@Public
@Unstable
public ContainerState getContainerState() {
return containerState;
}
@Public
@Unstable
public void setContainerState(ContainerState containerState) {
this.containerState = containerState;
}
}
| 4,466 | 23.409836 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationHistoryData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
/**
* The class contains all the fields that are stored persistently for
* <code>RMApp</code>.
*/
@Public
@Unstable
public class ApplicationHistoryData {
private ApplicationId applicationId;
private String applicationName;
private String applicationType;
private String user;
private String queue;
private long submitTime;
private long startTime;
private long finishTime;
private String diagnosticsInfo;
private FinalApplicationStatus finalApplicationStatus;
private YarnApplicationState yarnApplicationState;
@Public
@Unstable
public static ApplicationHistoryData newInstance(ApplicationId applicationId,
String applicationName, String applicationType, String queue,
String user, long submitTime, long startTime, long finishTime,
String diagnosticsInfo, FinalApplicationStatus finalApplicationStatus,
YarnApplicationState yarnApplicationState) {
ApplicationHistoryData appHD = new ApplicationHistoryData();
appHD.setApplicationId(applicationId);
appHD.setApplicationName(applicationName);
appHD.setApplicationType(applicationType);
appHD.setQueue(queue);
appHD.setUser(user);
appHD.setSubmitTime(submitTime);
appHD.setStartTime(startTime);
appHD.setFinishTime(finishTime);
appHD.setDiagnosticsInfo(diagnosticsInfo);
appHD.setFinalApplicationStatus(finalApplicationStatus);
appHD.setYarnApplicationState(yarnApplicationState);
return appHD;
}
@Public
@Unstable
public ApplicationId getApplicationId() {
return applicationId;
}
@Public
@Unstable
public void setApplicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
}
@Public
@Unstable
public String getApplicationName() {
return applicationName;
}
@Public
@Unstable
public void setApplicationName(String applicationName) {
this.applicationName = applicationName;
}
@Public
@Unstable
public String getApplicationType() {
return applicationType;
}
@Public
@Unstable
public void setApplicationType(String applicationType) {
this.applicationType = applicationType;
}
@Public
@Unstable
public String getUser() {
return user;
}
@Public
@Unstable
public void setUser(String user) {
this.user = user;
}
@Public
@Unstable
public String getQueue() {
return queue;
}
@Public
@Unstable
public void setQueue(String queue) {
this.queue = queue;
}
@Public
@Unstable
public long getSubmitTime() {
return submitTime;
}
@Public
@Unstable
public void setSubmitTime(long submitTime) {
this.submitTime = submitTime;
}
@Public
@Unstable
public long getStartTime() {
return startTime;
}
@Public
@Unstable
public void setStartTime(long startTime) {
this.startTime = startTime;
}
@Public
@Unstable
public long getFinishTime() {
return finishTime;
}
@Public
@Unstable
public void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
@Public
@Unstable
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
@Public
@Unstable
public void setDiagnosticsInfo(String diagnosticsInfo) {
this.diagnosticsInfo = diagnosticsInfo;
}
@Public
@Unstable
public FinalApplicationStatus getFinalApplicationStatus() {
return finalApplicationStatus;
}
@Public
@Unstable
public void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus) {
this.finalApplicationStatus = finalApplicationStatus;
}
@Public
@Unstable
public YarnApplicationState getYarnApplicationState() {
return this.yarnApplicationState;
}
@Public
@Unstable
public void
setYarnApplicationState(YarnApplicationState yarnApplicationState) {
this.yarnApplicationState = yarnApplicationState;
}
}
| 5,054 | 22.621495 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationStartData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when <code>RMApp</code>
* starts, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ApplicationStartData {
@Public
@Unstable
public static ApplicationStartData newInstance(ApplicationId applicationId,
String applicationName, String applicationType, String queue,
String user, long submitTime, long startTime) {
ApplicationStartData appSD = Records.newRecord(ApplicationStartData.class);
appSD.setApplicationId(applicationId);
appSD.setApplicationName(applicationName);
appSD.setApplicationType(applicationType);
appSD.setQueue(queue);
appSD.setUser(user);
appSD.setSubmitTime(submitTime);
appSD.setStartTime(startTime);
return appSD;
}
@Public
@Unstable
public abstract ApplicationId getApplicationId();
@Public
@Unstable
public abstract void setApplicationId(ApplicationId applicationId);
@Public
@Unstable
public abstract String getApplicationName();
@Public
@Unstable
public abstract void setApplicationName(String applicationName);
@Public
@Unstable
public abstract String getApplicationType();
@Public
@Unstable
public abstract void setApplicationType(String applicationType);
@Public
@Unstable
public abstract String getUser();
@Public
@Unstable
public abstract void setUser(String user);
@Public
@Unstable
public abstract String getQueue();
@Public
@Unstable
public abstract void setQueue(String queue);
@Public
@Unstable
public abstract long getSubmitTime();
@Public
@Unstable
public abstract void setSubmitTime(long submitTime);
@Public
@Unstable
public abstract long getStartTime();
@Public
@Unstable
public abstract void setStartTime(long startTime);
}
| 2,935 | 26.439252 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerFinishData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when
* <code>RMContainer</code> finishes, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ContainerFinishData {
@Public
@Unstable
public static ContainerFinishData newInstance(ContainerId containerId,
long finishTime, String diagnosticsInfo, int containerExitCode,
ContainerState containerState) {
ContainerFinishData containerFD =
Records.newRecord(ContainerFinishData.class);
containerFD.setContainerId(containerId);
containerFD.setFinishTime(finishTime);
containerFD.setDiagnosticsInfo(diagnosticsInfo);
containerFD.setContainerExitStatus(containerExitCode);
containerFD.setContainerState(containerState);
return containerFD;
}
@Public
@Unstable
public abstract ContainerId getContainerId();
@Public
@Unstable
public abstract void setContainerId(ContainerId containerId);
@Public
@Unstable
public abstract long getFinishTime();
@Public
@Unstable
public abstract void setFinishTime(long finishTime);
@Public
@Unstable
public abstract String getDiagnosticsInfo();
@Public
@Unstable
public abstract void setDiagnosticsInfo(String diagnosticsInfo);
@Public
@Unstable
public abstract int getContainerExitStatus();
@Public
@Unstable
public abstract void setContainerExitStatus(int containerExitStatus);
@Public
@Unstable
public abstract ContainerState getContainerState();
@Public
@Unstable
public abstract void setContainerState(ContainerState containerState);
}
| 2,762 | 29.362637 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptStartData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when
* <code>RMAppAttempt</code> starts, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ApplicationAttemptStartData {
@Public
@Unstable
public static ApplicationAttemptStartData newInstance(
ApplicationAttemptId appAttemptId, String host, int rpcPort,
ContainerId masterContainerId) {
ApplicationAttemptStartData appAttemptSD =
Records.newRecord(ApplicationAttemptStartData.class);
appAttemptSD.setApplicationAttemptId(appAttemptId);
appAttemptSD.setHost(host);
appAttemptSD.setRPCPort(rpcPort);
appAttemptSD.setMasterContainerId(masterContainerId);
return appAttemptSD;
}
@Public
@Unstable
public abstract ApplicationAttemptId getApplicationAttemptId();
@Public
@Unstable
public abstract void setApplicationAttemptId(
ApplicationAttemptId applicationAttemptId);
@Public
@Unstable
public abstract String getHost();
@Public
@Unstable
public abstract void setHost(String host);
@Public
@Unstable
public abstract int getRPCPort();
@Public
@Unstable
public abstract void setRPCPort(int rpcPort);
@Public
@Unstable
public abstract ContainerId getMasterContainerId();
@Public
@Unstable
public abstract void setMasterContainerId(ContainerId masterContainerId);
}
| 2,552 | 29.759036 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptFinishData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when
* <code>RMAppAttempt</code> finishes, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ApplicationAttemptFinishData {
@Public
@Unstable
public static ApplicationAttemptFinishData newInstance(
ApplicationAttemptId appAttemptId, String diagnosticsInfo,
String trackingURL, FinalApplicationStatus finalApplicationStatus,
YarnApplicationAttemptState yarnApplicationAttemptState) {
ApplicationAttemptFinishData appAttemptFD =
Records.newRecord(ApplicationAttemptFinishData.class);
appAttemptFD.setApplicationAttemptId(appAttemptId);
appAttemptFD.setDiagnosticsInfo(diagnosticsInfo);
appAttemptFD.setTrackingURL(trackingURL);
appAttemptFD.setFinalApplicationStatus(finalApplicationStatus);
appAttemptFD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
return appAttemptFD;
}
@Public
@Unstable
public abstract ApplicationAttemptId getApplicationAttemptId();
@Public
@Unstable
public abstract void setApplicationAttemptId(
ApplicationAttemptId applicationAttemptId);
@Public
@Unstable
public abstract String getTrackingURL();
@Public
@Unstable
public abstract void setTrackingURL(String trackingURL);
@Public
@Unstable
public abstract String getDiagnosticsInfo();
@Public
@Unstable
public abstract void setDiagnosticsInfo(String diagnosticsInfo);
@Public
@Unstable
public abstract FinalApplicationStatus getFinalApplicationStatus();
@Public
@Unstable
public abstract void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus);
@Public
@Unstable
public abstract YarnApplicationAttemptState getYarnApplicationAttemptState();
@Public
@Unstable
public abstract void setYarnApplicationAttemptState(
YarnApplicationAttemptState yarnApplicationAttemptState);
}
| 3,195 | 32.291667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationFinishData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when <code>RMApp</code>
* finishes, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ApplicationFinishData {
@Public
@Unstable
public static ApplicationFinishData newInstance(ApplicationId applicationId,
long finishTime, String diagnosticsInfo,
FinalApplicationStatus finalApplicationStatus,
YarnApplicationState yarnApplicationState) {
ApplicationFinishData appFD =
Records.newRecord(ApplicationFinishData.class);
appFD.setApplicationId(applicationId);
appFD.setFinishTime(finishTime);
appFD.setDiagnosticsInfo(diagnosticsInfo);
appFD.setFinalApplicationStatus(finalApplicationStatus);
appFD.setYarnApplicationState(yarnApplicationState);
return appFD;
}
@Public
@Unstable
public abstract ApplicationId getApplicationId();
@Public
@Unstable
public abstract void setApplicationId(ApplicationId applicationId);
@Public
@Unstable
public abstract long getFinishTime();
@Public
@Unstable
public abstract void setFinishTime(long finishTime);
@Public
@Unstable
public abstract String getDiagnosticsInfo();
@Public
@Unstable
public abstract void setDiagnosticsInfo(String diagnosticsInfo);
@Public
@Unstable
public abstract FinalApplicationStatus getFinalApplicationStatus();
@Public
@Unstable
public abstract void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus);
@Public
@Unstable
public abstract YarnApplicationState getYarnApplicationState();
@Public
@Unstable
public abstract void setYarnApplicationState(
YarnApplicationState yarnApplicationState);
}
| 2,967 | 30.242105 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ApplicationAttemptHistoryData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
/**
* The class contains all the fields that are stored persistently for
* <code>RMAppAttempt</code>.
*/
@Public
@Unstable
public class ApplicationAttemptHistoryData {
private ApplicationAttemptId applicationAttemptId;
private String host;
private int rpcPort;
private String trackingURL;
private String diagnosticsInfo;
private FinalApplicationStatus finalApplicationStatus;
private ContainerId masterContainerId;
private YarnApplicationAttemptState yarnApplicationAttemptState;
@Public
@Unstable
public static ApplicationAttemptHistoryData newInstance(
ApplicationAttemptId appAttemptId, String host, int rpcPort,
ContainerId masterContainerId, String diagnosticsInfo,
String trackingURL, FinalApplicationStatus finalApplicationStatus,
YarnApplicationAttemptState yarnApplicationAttemptState) {
ApplicationAttemptHistoryData appAttemptHD =
new ApplicationAttemptHistoryData();
appAttemptHD.setApplicationAttemptId(appAttemptId);
appAttemptHD.setHost(host);
appAttemptHD.setRPCPort(rpcPort);
appAttemptHD.setMasterContainerId(masterContainerId);
appAttemptHD.setDiagnosticsInfo(diagnosticsInfo);
appAttemptHD.setTrackingURL(trackingURL);
appAttemptHD.setFinalApplicationStatus(finalApplicationStatus);
appAttemptHD.setYarnApplicationAttemptState(yarnApplicationAttemptState);
return appAttemptHD;
}
@Public
@Unstable
public ApplicationAttemptId getApplicationAttemptId() {
return applicationAttemptId;
}
@Public
@Unstable
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
this.applicationAttemptId = applicationAttemptId;
}
@Public
@Unstable
public String getHost() {
return host;
}
@Public
@Unstable
public void setHost(String host) {
this.host = host;
}
@Public
@Unstable
public int getRPCPort() {
return rpcPort;
}
@Public
@Unstable
public void setRPCPort(int rpcPort) {
this.rpcPort = rpcPort;
}
@Public
@Unstable
public String getTrackingURL() {
return trackingURL;
}
@Public
@Unstable
public void setTrackingURL(String trackingURL) {
this.trackingURL = trackingURL;
}
@Public
@Unstable
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
@Public
@Unstable
public void setDiagnosticsInfo(String diagnosticsInfo) {
this.diagnosticsInfo = diagnosticsInfo;
}
@Public
@Unstable
public FinalApplicationStatus getFinalApplicationStatus() {
return finalApplicationStatus;
}
@Public
@Unstable
public void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus) {
this.finalApplicationStatus = finalApplicationStatus;
}
@Public
@Unstable
public ContainerId getMasterContainerId() {
return masterContainerId;
}
@Public
@Unstable
public void setMasterContainerId(ContainerId masterContainerId) {
this.masterContainerId = masterContainerId;
}
@Public
@Unstable
public YarnApplicationAttemptState getYarnApplicationAttemptState() {
return yarnApplicationAttemptState;
}
@Public
@Unstable
public void setYarnApplicationAttemptState(
YarnApplicationAttemptState yarnApplicationAttemptState) {
this.yarnApplicationAttemptState = yarnApplicationAttemptState;
}
}
| 4,621 | 25.872093 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/ContainerStartData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Records;
/**
* The class contains the fields that can be determined when
* <code>RMContainer</code> starts, and that need to be stored persistently.
*/
@Public
@Unstable
public abstract class ContainerStartData {
@Public
@Unstable
public static ContainerStartData newInstance(ContainerId containerId,
Resource allocatedResource, NodeId assignedNode, Priority priority,
long startTime) {
ContainerStartData containerSD =
Records.newRecord(ContainerStartData.class);
containerSD.setContainerId(containerId);
containerSD.setAllocatedResource(allocatedResource);
containerSD.setAssignedNode(assignedNode);
containerSD.setPriority(priority);
containerSD.setStartTime(startTime);
return containerSD;
}
@Public
@Unstable
public abstract ContainerId getContainerId();
@Public
@Unstable
public abstract void setContainerId(ContainerId containerId);
@Public
@Unstable
public abstract Resource getAllocatedResource();
@Public
@Unstable
public abstract void setAllocatedResource(Resource resource);
@Public
@Unstable
public abstract NodeId getAssignedNode();
@Public
@Unstable
public abstract void setAssignedNode(NodeId nodeId);
@Public
@Unstable
public abstract Priority getPriority();
@Public
@Unstable
public abstract void setPriority(Priority priority);
@Public
@Unstable
public abstract long getStartTime();
@Public
@Unstable
public abstract void setStartTime(long startTime);
}
| 2,766 | 28.752688 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationFinishDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import com.google.protobuf.TextFormat;
public class ApplicationFinishDataPBImpl extends ApplicationFinishData {
ApplicationFinishDataProto proto = ApplicationFinishDataProto
.getDefaultInstance();
ApplicationFinishDataProto.Builder builder = null;
boolean viaProto = false;
private ApplicationId applicationId;
public ApplicationFinishDataPBImpl() {
builder = ApplicationFinishDataProto.newBuilder();
}
public ApplicationFinishDataPBImpl(ApplicationFinishDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationId getApplicationId() {
if (this.applicationId != null) {
return this.applicationId;
}
ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationId()) {
return null;
}
this.applicationId = convertFromProtoFormat(p.getApplicationId());
return this.applicationId;
}
@Override
public void setApplicationId(ApplicationId applicationId) {
maybeInitBuilder();
if (applicationId == null) {
builder.clearApplicationId();
}
this.applicationId = applicationId;
}
@Override
public long getFinishTime() {
ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
@Override
public String getDiagnosticsInfo() {
ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticsInfo()) {
return null;
}
return p.getDiagnosticsInfo();
}
@Override
public void setDiagnosticsInfo(String diagnosticsInfo) {
maybeInitBuilder();
if (diagnosticsInfo == null) {
builder.clearDiagnosticsInfo();
return;
}
builder.setDiagnosticsInfo(diagnosticsInfo);
}
@Override
public FinalApplicationStatus getFinalApplicationStatus() {
ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasFinalApplicationStatus()) {
return null;
}
return convertFromProtoFormat(p.getFinalApplicationStatus());
}
@Override
public void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus) {
maybeInitBuilder();
if (finalApplicationStatus == null) {
builder.clearFinalApplicationStatus();
return;
}
builder
.setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus));
}
@Override
public YarnApplicationState getYarnApplicationState() {
ApplicationFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasYarnApplicationState()) {
return null;
}
return convertFromProtoFormat(p.getYarnApplicationState());
}
@Override
public void setYarnApplicationState(YarnApplicationState state) {
maybeInitBuilder();
if (state == null) {
builder.clearYarnApplicationState();
return;
}
builder.setYarnApplicationState(convertToProtoFormat(state));
}
public ApplicationFinishDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationId != null
&& !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
builder.getApplicationId())) {
builder.setApplicationId(convertToProtoFormat(this.applicationId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationFinishDataProto.newBuilder(proto);
}
viaProto = false;
}
private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) {
return ((ApplicationIdPBImpl) applicationId).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(
ApplicationIdProto applicationId) {
return new ApplicationIdPBImpl(applicationId);
}
private FinalApplicationStatus convertFromProtoFormat(
FinalApplicationStatusProto finalApplicationStatus) {
return ProtoUtils.convertFromProtoFormat(finalApplicationStatus);
}
private FinalApplicationStatusProto convertToProtoFormat(
FinalApplicationStatus finalApplicationStatus) {
return ProtoUtils.convertToProtoFormat(finalApplicationStatus);
}
private YarnApplicationStateProto convertToProtoFormat(
YarnApplicationState state) {
return ProtoUtils.convertToProtoFormat(state);
}
private YarnApplicationState convertFromProtoFormat(
YarnApplicationStateProto yarnApplicationState) {
return ProtoUtils.convertFromProtoFormat(yarnApplicationState);
}
}
| 6,996 | 29.823789 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import com.google.protobuf.TextFormat;
public class ApplicationAttemptStartDataPBImpl extends
ApplicationAttemptStartData {
ApplicationAttemptStartDataProto proto = ApplicationAttemptStartDataProto
.getDefaultInstance();
ApplicationAttemptStartDataProto.Builder builder = null;
boolean viaProto = false;
public ApplicationAttemptStartDataPBImpl() {
builder = ApplicationAttemptStartDataProto.newBuilder();
}
public ApplicationAttemptStartDataPBImpl(
ApplicationAttemptStartDataProto proto) {
this.proto = proto;
viaProto = true;
}
private ApplicationAttemptId applicationAttemptId;
private ContainerId masterContainerId;
@Override
public ApplicationAttemptId getApplicationAttemptId() {
if (this.applicationAttemptId != null) {
return this.applicationAttemptId;
}
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.applicationAttemptId =
convertFromProtoFormat(p.getApplicationAttemptId());
return this.applicationAttemptId;
}
@Override
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
maybeInitBuilder();
if (applicationAttemptId == null) {
builder.clearApplicationAttemptId();
}
this.applicationAttemptId = applicationAttemptId;
}
@Override
public String getHost() {
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasHost()) {
return null;
}
return p.getHost();
}
@Override
public void setHost(String host) {
maybeInitBuilder();
if (host == null) {
builder.clearHost();
return;
}
builder.setHost(host);
}
@Override
public int getRPCPort() {
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getRpcPort();
}
@Override
public void setRPCPort(int rpcPort) {
maybeInitBuilder();
builder.setRpcPort(rpcPort);
}
@Override
public ContainerId getMasterContainerId() {
if (this.masterContainerId != null) {
return this.masterContainerId;
}
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.masterContainerId = convertFromProtoFormat(p.getMasterContainerId());
return this.masterContainerId;
}
@Override
public void setMasterContainerId(ContainerId masterContainerId) {
maybeInitBuilder();
if (masterContainerId == null) {
builder.clearMasterContainerId();
}
this.masterContainerId = masterContainerId;
}
public ApplicationAttemptStartDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationAttemptId != null
&& !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
.equals(builder.getApplicationAttemptId())) {
builder
.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
}
if (this.masterContainerId != null
&& !((ContainerIdPBImpl) this.masterContainerId).getProto().equals(
builder.getMasterContainerId())) {
builder
.setMasterContainerId(convertToProtoFormat(this.masterContainerId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationAttemptStartDataProto.newBuilder(proto);
}
viaProto = false;
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto applicationAttemptId) {
return new ApplicationAttemptIdPBImpl(applicationAttemptId);
}
private ApplicationAttemptIdProto convertToProtoFormat(
ApplicationAttemptId applicationAttemptId) {
return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
}
private ContainerIdPBImpl
convertFromProtoFormat(ContainerIdProto containerId) {
return new ContainerIdPBImpl(containerId);
}
private ContainerIdProto convertToProtoFormat(ContainerId masterContainerId) {
return ((ContainerIdPBImpl) masterContainerId).getProto();
}
}
| 6,479 | 30.004785 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerFinishDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import com.google.protobuf.TextFormat;
public class ContainerFinishDataPBImpl extends ContainerFinishData {
ContainerFinishDataProto proto = ContainerFinishDataProto
.getDefaultInstance();
ContainerFinishDataProto.Builder builder = null;
boolean viaProto = false;
private ContainerId containerId;
public ContainerFinishDataPBImpl() {
builder = ContainerFinishDataProto.newBuilder();
}
public ContainerFinishDataPBImpl(ContainerFinishDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ContainerId getContainerId() {
if (this.containerId != null) {
return this.containerId;
}
ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainerId()) {
return null;
}
this.containerId = convertFromProtoFormat(p.getContainerId());
return this.containerId;
}
@Override
public void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null) {
builder.clearContainerId();
}
this.containerId = containerId;
}
@Override
public long getFinishTime() {
ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
@Override
public String getDiagnosticsInfo() {
ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticsInfo()) {
return null;
}
return p.getDiagnosticsInfo();
}
@Override
public void setDiagnosticsInfo(String diagnosticsInfo) {
maybeInitBuilder();
if (diagnosticsInfo == null) {
builder.clearDiagnosticsInfo();
return;
}
builder.setDiagnosticsInfo(diagnosticsInfo);
}
@Override
public int getContainerExitStatus() {
ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getContainerExitStatus();
}
@Override
public ContainerState getContainerState() {
ContainerFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainerState()) {
return null;
}
return convertFromProtoFormat(p.getContainerState());
}
@Override
public void setContainerState(ContainerState state) {
maybeInitBuilder();
if (state == null) {
builder.clearContainerState();
return;
}
builder.setContainerState(convertToProtoFormat(state));
}
@Override
public void setContainerExitStatus(int containerExitStatus) {
maybeInitBuilder();
builder.setContainerExitStatus(containerExitStatus);
}
public ContainerFinishDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.containerId != null
&& !((ContainerIdPBImpl) this.containerId).getProto().equals(
builder.getContainerId())) {
builder.setContainerId(convertToProtoFormat(this.containerId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ContainerFinishDataProto.newBuilder(proto);
}
viaProto = false;
}
private ContainerIdProto convertToProtoFormat(ContainerId containerId) {
return ((ContainerIdPBImpl) containerId).getProto();
}
private ContainerIdPBImpl
convertFromProtoFormat(ContainerIdProto containerId) {
return new ContainerIdPBImpl(containerId);
}
private ContainerStateProto convertToProtoFormat(ContainerState state) {
return ProtoUtils.convertToProtoFormat(state);
}
private ContainerState convertFromProtoFormat(
ContainerStateProto containerState) {
return ProtoUtils.convertFromProtoFormat(containerState);
}
}
| 5,970 | 28.126829 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationStartDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import com.google.protobuf.TextFormat;
public class ApplicationStartDataPBImpl extends ApplicationStartData {
ApplicationStartDataProto proto = ApplicationStartDataProto
.getDefaultInstance();
ApplicationStartDataProto.Builder builder = null;
boolean viaProto = false;
private ApplicationId applicationId;
public ApplicationStartDataPBImpl() {
builder = ApplicationStartDataProto.newBuilder();
}
public ApplicationStartDataPBImpl(ApplicationStartDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationId getApplicationId() {
if (this.applicationId != null) {
return this.applicationId;
}
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationId()) {
return null;
}
this.applicationId = convertFromProtoFormat(p.getApplicationId());
return this.applicationId;
}
@Override
public void setApplicationId(ApplicationId applicationId) {
maybeInitBuilder();
if (applicationId == null) {
builder.clearApplicationId();
}
this.applicationId = applicationId;
}
@Override
public String getApplicationName() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationName()) {
return null;
}
return p.getApplicationName();
}
@Override
public void setApplicationName(String applicationName) {
maybeInitBuilder();
if (applicationName == null) {
builder.clearApplicationName();
return;
}
builder.setApplicationName(applicationName);
}
@Override
public String getApplicationType() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationType()) {
return null;
}
return p.getApplicationType();
}
@Override
public void setApplicationType(String applicationType) {
maybeInitBuilder();
if (applicationType == null) {
builder.clearApplicationType();
return;
}
builder.setApplicationType(applicationType);
}
@Override
public String getUser() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasUser()) {
return null;
}
return p.getUser();
}
@Override
public void setUser(String user) {
maybeInitBuilder();
if (user == null) {
builder.clearUser();
return;
}
builder.setUser(user);
}
@Override
public String getQueue() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasQueue()) {
return null;
}
return p.getQueue();
}
@Override
public void setQueue(String queue) {
maybeInitBuilder();
if (queue == null) {
builder.clearQueue();
return;
}
builder.setQueue(queue);
}
@Override
public long getSubmitTime() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getSubmitTime();
}
@Override
public void setSubmitTime(long submitTime) {
maybeInitBuilder();
builder.setSubmitTime(submitTime);
}
@Override
public long getStartTime() {
ApplicationStartDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
public ApplicationStartDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationId != null
&& !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
builder.getApplicationId())) {
builder.setApplicationId(convertToProtoFormat(this.applicationId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationStartDataProto.newBuilder(proto);
}
viaProto = false;
}
private ApplicationIdProto convertToProtoFormat(ApplicationId applicationId) {
return ((ApplicationIdPBImpl) applicationId).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(
ApplicationIdProto applicationId) {
return new ApplicationIdPBImpl(applicationId);
}
}
| 6,237 | 26.121739 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ContainerStartDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
import com.google.protobuf.TextFormat;
public class ContainerStartDataPBImpl extends ContainerStartData {
ContainerStartDataProto proto = ContainerStartDataProto.getDefaultInstance();
ContainerStartDataProto.Builder builder = null;
boolean viaProto = false;
private ContainerId containerId;
private Resource resource;
private NodeId nodeId;
private Priority priority;
public ContainerStartDataPBImpl() {
builder = ContainerStartDataProto.newBuilder();
}
public ContainerStartDataPBImpl(ContainerStartDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ContainerId getContainerId() {
if (this.containerId != null) {
return this.containerId;
}
ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainerId()) {
return null;
}
this.containerId = convertFromProtoFormat(p.getContainerId());
return this.containerId;
}
@Override
public void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null) {
builder.clearContainerId();
}
this.containerId = containerId;
}
@Override
public Resource getAllocatedResource() {
if (this.resource != null) {
return this.resource;
}
ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAllocatedResource()) {
return null;
}
this.resource = convertFromProtoFormat(p.getAllocatedResource());
return this.resource;
}
@Override
public void setAllocatedResource(Resource resource) {
maybeInitBuilder();
if (resource == null) {
builder.clearAllocatedResource();
}
this.resource = resource;
}
@Override
public NodeId getAssignedNode() {
if (this.nodeId != null) {
return this.nodeId;
}
ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAssignedNodeId()) {
return null;
}
this.nodeId = convertFromProtoFormat(p.getAssignedNodeId());
return this.nodeId;
}
@Override
public void setAssignedNode(NodeId nodeId) {
maybeInitBuilder();
if (nodeId == null) {
builder.clearAssignedNodeId();
}
this.nodeId = nodeId;
}
@Override
public Priority getPriority() {
if (this.priority != null) {
return this.priority;
}
ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasPriority()) {
return null;
}
this.priority = convertFromProtoFormat(p.getPriority());
return this.priority;
}
@Override
public void setPriority(Priority priority) {
maybeInitBuilder();
if (priority == null) {
builder.clearPriority();
}
this.priority = priority;
}
@Override
public long getStartTime() {
ContainerStartDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
public ContainerStartDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.containerId != null
&& !((ContainerIdPBImpl) this.containerId).getProto().equals(
builder.getContainerId())) {
builder.setContainerId(convertToProtoFormat(this.containerId));
}
if (this.resource != null
&& !((ResourcePBImpl) this.resource).getProto().equals(
builder.getAllocatedResource())) {
builder.setAllocatedResource(convertToProtoFormat(this.resource));
}
if (this.nodeId != null
&& !((NodeIdPBImpl) this.nodeId).getProto().equals(
builder.getAssignedNodeId())) {
builder.setAssignedNodeId(convertToProtoFormat(this.nodeId));
}
if (this.priority != null
&& !((PriorityPBImpl) this.priority).getProto().equals(
builder.getPriority())) {
builder.setPriority(convertToProtoFormat(this.priority));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ContainerStartDataProto.newBuilder(proto);
}
viaProto = false;
}
private ContainerIdProto convertToProtoFormat(ContainerId containerId) {
return ((ContainerIdPBImpl) containerId).getProto();
}
private ContainerIdPBImpl
convertFromProtoFormat(ContainerIdProto containerId) {
return new ContainerIdPBImpl(containerId);
}
private ResourceProto convertToProtoFormat(Resource resource) {
return ((ResourcePBImpl) resource).getProto();
}
private ResourcePBImpl convertFromProtoFormat(ResourceProto resource) {
return new ResourcePBImpl(resource);
}
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl) nodeId).getProto();
}
private NodeIdPBImpl convertFromProtoFormat(NodeIdProto nodeId) {
return new NodeIdPBImpl(nodeId);
}
private PriorityProto convertToProtoFormat(Priority priority) {
return ((PriorityPBImpl) priority).getProto();
}
private PriorityPBImpl convertFromProtoFormat(PriorityProto priority) {
return new PriorityPBImpl(priority);
}
}
| 7,710 | 28.772201 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptFinishDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
import org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import com.google.protobuf.TextFormat;
public class ApplicationAttemptFinishDataPBImpl extends
ApplicationAttemptFinishData {
ApplicationAttemptFinishDataProto proto = ApplicationAttemptFinishDataProto
.getDefaultInstance();
ApplicationAttemptFinishDataProto.Builder builder = null;
boolean viaProto = false;
public ApplicationAttemptFinishDataPBImpl() {
builder = ApplicationAttemptFinishDataProto.newBuilder();
}
public ApplicationAttemptFinishDataPBImpl(
ApplicationAttemptFinishDataProto proto) {
this.proto = proto;
viaProto = true;
}
private ApplicationAttemptId applicationAttemptId;
@Override
public ApplicationAttemptId getApplicationAttemptId() {
if (this.applicationAttemptId != null) {
return this.applicationAttemptId;
}
ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.applicationAttemptId =
convertFromProtoFormat(p.getApplicationAttemptId());
return this.applicationAttemptId;
}
@Override
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
maybeInitBuilder();
if (applicationAttemptId == null) {
builder.clearApplicationAttemptId();
}
this.applicationAttemptId = applicationAttemptId;
}
@Override
public String getTrackingURL() {
ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTrackingUrl()) {
return null;
}
return p.getTrackingUrl();
}
@Override
public void setTrackingURL(String trackingURL) {
maybeInitBuilder();
if (trackingURL == null) {
builder.clearTrackingUrl();
return;
}
builder.setTrackingUrl(trackingURL);
}
@Override
public String getDiagnosticsInfo() {
ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticsInfo()) {
return null;
}
return p.getDiagnosticsInfo();
}
@Override
public void setDiagnosticsInfo(String diagnosticsInfo) {
maybeInitBuilder();
if (diagnosticsInfo == null) {
builder.clearDiagnosticsInfo();
return;
}
builder.setDiagnosticsInfo(diagnosticsInfo);
}
@Override
public FinalApplicationStatus getFinalApplicationStatus() {
ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasFinalApplicationStatus()) {
return null;
}
return convertFromProtoFormat(p.getFinalApplicationStatus());
}
@Override
public void setFinalApplicationStatus(
FinalApplicationStatus finalApplicationStatus) {
maybeInitBuilder();
if (finalApplicationStatus == null) {
builder.clearFinalApplicationStatus();
return;
}
builder
.setFinalApplicationStatus(convertToProtoFormat(finalApplicationStatus));
}
@Override
public YarnApplicationAttemptState getYarnApplicationAttemptState() {
ApplicationAttemptFinishDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasYarnApplicationAttemptState()) {
return null;
}
return convertFromProtoFormat(p.getYarnApplicationAttemptState());
}
@Override
public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) {
maybeInitBuilder();
if (state == null) {
builder.clearYarnApplicationAttemptState();
return;
}
builder.setYarnApplicationAttemptState(convertToProtoFormat(state));
}
public ApplicationAttemptFinishDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationAttemptId != null
&& !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
.equals(builder.getApplicationAttemptId())) {
builder
.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationAttemptFinishDataProto.newBuilder(proto);
}
viaProto = false;
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto applicationAttemptId) {
return new ApplicationAttemptIdPBImpl(applicationAttemptId);
}
private ApplicationAttemptIdProto convertToProtoFormat(
ApplicationAttemptId applicationAttemptId) {
return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
}
private FinalApplicationStatus convertFromProtoFormat(
FinalApplicationStatusProto finalApplicationStatus) {
return ProtoUtils.convertFromProtoFormat(finalApplicationStatus);
}
private FinalApplicationStatusProto convertToProtoFormat(
FinalApplicationStatus finalApplicationStatus) {
return ProtoUtils.convertToProtoFormat(finalApplicationStatus);
}
private YarnApplicationAttemptStateProto convertToProtoFormat(
YarnApplicationAttemptState state) {
return ProtoUtils.convertToProtoFormat(state);
}
private YarnApplicationAttemptState convertFromProtoFormat(
YarnApplicationAttemptStateProto yarnApplicationAttemptState) {
return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState);
}
}
| 7,685 | 31.025 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/GenericObjectMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import org.codehaus.jackson.map.ObjectWriter;
/**
* A utility class providing methods for serializing and deserializing
* objects. The {@link #write(Object)} and {@link #read(byte[])} methods are
* used by the {@link LeveldbTimelineStore} to store and retrieve arbitrary
* JSON, while the {@link #writeReverseOrderedLong} and {@link
* #readReverseOrderedLong} methods are used to sort entities in descending
* start time order.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class GenericObjectMapper {
private static final byte[] EMPTY_BYTES = new byte[0];
public static final ObjectReader OBJECT_READER;
public static final ObjectWriter OBJECT_WRITER;
static {
ObjectMapper mapper = new ObjectMapper();
OBJECT_READER = mapper.reader(Object.class);
OBJECT_WRITER = mapper.writer();
}
/**
* Serializes an Object into a byte array. Along with {@link #read(byte[])},
* can be used to serialize an Object and deserialize it into an Object of
* the same type without needing to specify the Object's type,
* as long as it is one of the JSON-compatible objects understood by
* ObjectMapper.
*
* @param o An Object
* @return A byte array representation of the Object
* @throws IOException if there is a write error
*/
public static byte[] write(Object o) throws IOException {
if (o == null) {
return EMPTY_BYTES;
}
return OBJECT_WRITER.writeValueAsBytes(o);
}
/**
* Deserializes an Object from a byte array created with
* {@link #write(Object)}.
*
* @param b A byte array
* @return An Object
* @throws IOException if there is a read error
*/
public static Object read(byte[] b) throws IOException {
return read(b, 0);
}
/**
* Deserializes an Object from a byte array at a specified offset, assuming
* the bytes were created with {@link #write(Object)}.
*
* @param b A byte array
* @param offset Offset into the array
* @return An Object
* @throws IOException if there is a read error
*/
public static Object read(byte[] b, int offset) throws IOException {
if (b == null || b.length == 0) {
return null;
}
return OBJECT_READER.readValue(b, offset, b.length - offset);
}
/**
* Converts a long to a 8-byte array so that lexicographic ordering of the
* produced byte arrays sort the longs in descending order.
*
* @param l A long
* @return A byte array
*/
public static byte[] writeReverseOrderedLong(long l) {
byte[] b = new byte[8];
return writeReverseOrderedLong(l, b, 0);
}
public static byte[] writeReverseOrderedLong(long l, byte[] b, int offset) {
b[offset] = (byte)(0x7f ^ ((l >> 56) & 0xff));
for (int i = offset+1; i < offset+7; i++) {
b[i] = (byte)(0xff ^ ((l >> 8*(7-i)) & 0xff));
}
b[offset+7] = (byte)(0xff ^ (l & 0xff));
return b;
}
/**
* Reads 8 bytes from an array starting at the specified offset and
* converts them to a long. The bytes are assumed to have been created
* with {@link #writeReverseOrderedLong}.
*
* @param b A byte array
* @param offset An offset into the byte array
* @return A long
*/
public static long readReverseOrderedLong(byte[] b, int offset) {
long l = b[offset] & 0xff;
for (int i = 1; i < 8; i++) {
l = l << 8;
l = l | (b[offset+i]&0xff);
}
return l ^ 0x7fffffffffffffffl;
}
}
| 4,520 | 32.242647 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/NameValuePair.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A class holding a name and value pair, used for specifying filters in
* {@link TimelineReader}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NameValuePair {
String name;
Object value;
public NameValuePair(String name, Object value) {
this.name = name;
this.value = value;
}
/**
* Get the name.
* @return The name.
*/
public String getName() {
return name;
}
/**
* Get the value.
* @return The value.
*/
public Object getValue() {
return value;
}
@Override
public String toString() {
return "{ name: " + name + ", value: " + value + " }";
}
}
| 1,623 | 26.066667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience;
| 941 | 43.857143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import com.google.common.annotations.VisibleForTesting;
/**
* The class wrap over the timeline store and the ACLs manager. It does some non
* trivial manipulation of the timeline data before putting or after getting it
* from the timeline store, and checks the user's access to it.
*
*/
public class TimelineDataManager extends AbstractService {
private static final Log LOG = LogFactory.getLog(TimelineDataManager.class);
@VisibleForTesting
public static final String DEFAULT_DOMAIN_ID = "DEFAULT";
private TimelineDataManagerMetrics metrics;
private TimelineStore store;
private TimelineACLsManager timelineACLsManager;
public TimelineDataManager(TimelineStore store,
TimelineACLsManager timelineACLsManager) {
super(TimelineDataManager.class.getName());
this.store = store;
this.timelineACLsManager = timelineACLsManager;
timelineACLsManager.setTimelineStore(store);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
metrics = TimelineDataManagerMetrics.create();
TimelineDomain domain = store.getDomain("DEFAULT");
// it is okay to reuse an existing domain even if it was created by another
// user of the timeline server before, because it allows everybody to access.
if (domain == null) {
// create a default domain, which allows everybody to access and modify
// the entities in it.
domain = new TimelineDomain();
domain.setId(DEFAULT_DOMAIN_ID);
domain.setDescription("System Default Domain");
domain.setOwner(
UserGroupInformation.getCurrentUser().getShortUserName());
domain.setReaders("*");
domain.setWriters("*");
store.put(domain);
}
super.serviceInit(conf);
}
public interface CheckAcl {
boolean check(TimelineEntity entity) throws IOException;
}
class CheckAclImpl implements CheckAcl {
final UserGroupInformation ugi;
public CheckAclImpl(UserGroupInformation callerUGI) {
ugi = callerUGI;
}
public boolean check(TimelineEntity entity) throws IOException {
try{
return timelineACLsManager.checkAccess(
ugi, ApplicationAccessType.VIEW_APP, entity);
} catch (YarnException e) {
LOG.info("Error when verifying access for user " + ugi
+ " on the events of the timeline entity "
+ new EntityIdentifier(entity.getEntityId(),
entity.getEntityType()), e);
return false;
}
}
}
/**
* Get the timeline entities that the given user have access to. The meaning
* of each argument has been documented with
* {@link TimelineReader#getEntities}.
*
* @see TimelineReader#getEntities
*/
public TimelineEntities getEntities(
String entityType,
NameValuePair primaryFilter,
Collection<NameValuePair> secondaryFilter,
Long windowStart,
Long windowEnd,
String fromId,
Long fromTs,
Long limit,
EnumSet<Field> fields,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrGetEntitiesOps();
try {
TimelineEntities entities = doGetEntities(
entityType,
primaryFilter,
secondaryFilter,
windowStart,
windowEnd,
fromId,
fromTs,
limit,
fields,
callerUGI);
metrics.incrGetEntitiesTotal(entities.getEntities().size());
return entities;
} finally {
metrics.addGetEntitiesTime(Time.monotonicNow() - startTime);
}
}
private TimelineEntities doGetEntities(
String entityType,
NameValuePair primaryFilter,
Collection<NameValuePair> secondaryFilter,
Long windowStart,
Long windowEnd,
String fromId,
Long fromTs,
Long limit,
EnumSet<Field> fields,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineEntities entities = null;
entities = store.getEntities(
entityType,
limit,
windowStart,
windowEnd,
fromId,
fromTs,
primaryFilter,
secondaryFilter,
fields,
new CheckAclImpl(callerUGI));
if (entities == null) {
return new TimelineEntities();
}
return entities;
}
/**
* Get the single timeline entity that the given user has access to. The
* meaning of each argument has been documented with
* {@link TimelineReader#getEntity}.
*
* @see TimelineReader#getEntity
*/
public TimelineEntity getEntity(
String entityType,
String entityId,
EnumSet<Field> fields,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrGetEntityOps();
try {
return doGetEntity(entityType, entityId, fields, callerUGI);
} finally {
metrics.addGetEntityTime(Time.monotonicNow() - startTime);
}
}
private TimelineEntity doGetEntity(
String entityType,
String entityId,
EnumSet<Field> fields,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineEntity entity = null;
entity =
store.getEntity(entityId, entityType, fields);
if (entity != null) {
addDefaultDomainIdIfAbsent(entity);
// check ACLs
if (!timelineACLsManager.checkAccess(
callerUGI, ApplicationAccessType.VIEW_APP, entity)) {
entity = null;
}
}
return entity;
}
/**
* Get the events whose entities the given user has access to. The meaning of
* each argument has been documented with
* {@link TimelineReader#getEntityTimelines}.
*
* @see TimelineReader#getEntityTimelines
*/
public TimelineEvents getEvents(
String entityType,
SortedSet<String> entityIds,
SortedSet<String> eventTypes,
Long windowStart,
Long windowEnd,
Long limit,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrGetEventsOps();
try {
TimelineEvents events = doGetEvents(
entityType,
entityIds,
eventTypes,
windowStart,
windowEnd,
limit,
callerUGI);
metrics.incrGetEventsTotal(events.getAllEvents().size());
return events;
} finally {
metrics.addGetEventsTime(Time.monotonicNow() - startTime);
}
}
private TimelineEvents doGetEvents(
String entityType,
SortedSet<String> entityIds,
SortedSet<String> eventTypes,
Long windowStart,
Long windowEnd,
Long limit,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineEvents events = null;
events = store.getEntityTimelines(
entityType,
entityIds,
limit,
windowStart,
windowEnd,
eventTypes);
if (events != null) {
Iterator<TimelineEvents.EventsOfOneEntity> eventsItr =
events.getAllEvents().iterator();
while (eventsItr.hasNext()) {
TimelineEvents.EventsOfOneEntity eventsOfOneEntity = eventsItr.next();
try {
TimelineEntity entity = store.getEntity(
eventsOfOneEntity.getEntityId(),
eventsOfOneEntity.getEntityType(),
EnumSet.of(Field.PRIMARY_FILTERS));
addDefaultDomainIdIfAbsent(entity);
// check ACLs
if (!timelineACLsManager.checkAccess(
callerUGI, ApplicationAccessType.VIEW_APP, entity)) {
eventsItr.remove();
}
} catch (Exception e) {
LOG.warn("Error when verifying access for user " + callerUGI
+ " on the events of the timeline entity "
+ new EntityIdentifier(eventsOfOneEntity.getEntityId(),
eventsOfOneEntity.getEntityType()), e);
eventsItr.remove();
}
}
}
if (events == null) {
return new TimelineEvents();
}
return events;
}
/**
* Store the timeline entities into the store and set the owner of them to the
* given user.
*/
public TimelinePutResponse postEntities(
TimelineEntities entities,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrPostEntitiesOps();
try {
return doPostEntities(entities, callerUGI);
} finally {
metrics.addPostEntitiesTime(Time.monotonicNow() - startTime);
}
}
private TimelinePutResponse doPostEntities(
TimelineEntities entities,
UserGroupInformation callerUGI) throws YarnException, IOException {
if (entities == null) {
return new TimelinePutResponse();
}
metrics.incrPostEntitiesTotal(entities.getEntities().size());
TimelineEntities entitiesToPut = new TimelineEntities();
List<TimelinePutResponse.TimelinePutError> errors =
new ArrayList<TimelinePutResponse.TimelinePutError>();
for (TimelineEntity entity : entities.getEntities()) {
// if the domain id is not specified, the entity will be put into
// the default domain
if (entity.getDomainId() == null ||
entity.getDomainId().length() == 0) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
// check if there is existing entity
TimelineEntity existingEntity = null;
try {
existingEntity =
store.getEntity(entity.getEntityId(), entity.getEntityType(),
EnumSet.of(Field.PRIMARY_FILTERS));
if (existingEntity != null) {
addDefaultDomainIdIfAbsent(existingEntity);
if (!existingEntity.getDomainId().equals(entity.getDomainId())) {
throw new YarnException("The domain of the timeline entity "
+ "{ id: " + entity.getEntityId() + ", type: "
+ entity.getEntityType() + " } is not allowed to be changed from "
+ existingEntity.getDomainId() + " to " + entity.getDomainId());
}
}
if (!timelineACLsManager.checkAccess(
callerUGI, ApplicationAccessType.MODIFY_APP, entity)) {
throw new YarnException(callerUGI
+ " is not allowed to put the timeline entity "
+ "{ id: " + entity.getEntityId() + ", type: "
+ entity.getEntityType() + " } into the domain "
+ entity.getDomainId() + ".");
}
} catch (Exception e) {
// Skip the entity which already exists and was put by others
LOG.warn("Skip the timeline entity: { id: " + entity.getEntityId()
+ ", type: "+ entity.getEntityType() + " }", e);
TimelinePutResponse.TimelinePutError error =
new TimelinePutResponse.TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(
TimelinePutResponse.TimelinePutError.ACCESS_DENIED);
errors.add(error);
continue;
}
entitiesToPut.addEntity(entity);
}
TimelinePutResponse response = store.put(entitiesToPut);
// add the errors of timeline system filter key conflict
response.addErrors(errors);
return response;
}
/**
* Add or update an domain. If the domain already exists, only the owner
* and the admin can update it.
*/
public void putDomain(TimelineDomain domain,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrPutDomainOps();
try {
doPutDomain(domain, callerUGI);
} finally {
metrics.addPutDomainTime(Time.monotonicNow() - startTime);
}
}
private void doPutDomain(TimelineDomain domain,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineDomain existingDomain =
store.getDomain(domain.getId());
if (existingDomain != null) {
if (!timelineACLsManager.checkAccess(callerUGI, existingDomain)) {
throw new YarnException(callerUGI.getShortUserName() +
" is not allowed to override an existing domain " +
existingDomain.getId());
}
// Set it again in case ACLs are not enabled: The domain can be
// modified by every body, but the owner is not changed.
domain.setOwner(existingDomain.getOwner());
}
store.put(domain);
// If the domain exists already, it is likely to be in the cache.
// We need to invalidate it.
if (existingDomain != null) {
timelineACLsManager.replaceIfExist(domain);
}
}
/**
* Get a single domain of the particular ID. If callerUGI is not the owner
* or the admin of the domain, null will be returned.
*/
public TimelineDomain getDomain(String domainId,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrGetDomainOps();
try {
return doGetDomain(domainId, callerUGI);
} finally {
metrics.addGetDomainTime(Time.monotonicNow() - startTime);
}
}
private TimelineDomain doGetDomain(String domainId,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineDomain domain = store.getDomain(domainId);
if (domain != null) {
if (timelineACLsManager.checkAccess(callerUGI, domain)) {
return domain;
}
}
return null;
}
/**
* Get all the domains that belong to the given owner. If callerUGI is not
* the owner or the admin of the domain, empty list is going to be returned.
*/
public TimelineDomains getDomains(String owner,
UserGroupInformation callerUGI) throws YarnException, IOException {
long startTime = Time.monotonicNow();
metrics.incrGetDomainsOps();
try {
TimelineDomains domains = doGetDomains(owner, callerUGI);
metrics.incrGetDomainsTotal(domains.getDomains().size());
return domains;
} finally {
metrics.addGetDomainsTime(Time.monotonicNow() - startTime);
}
}
private TimelineDomains doGetDomains(String owner,
UserGroupInformation callerUGI) throws YarnException, IOException {
TimelineDomains domains = store.getDomains(owner);
boolean hasAccess = true;
if (domains.getDomains().size() > 0) {
// The owner for each domain is the same, just need to check one
hasAccess = timelineACLsManager.checkAccess(
callerUGI, domains.getDomains().get(0));
}
if (hasAccess) {
return domains;
} else {
return new TimelineDomains();
}
}
private static void addDefaultDomainIdIfAbsent(TimelineEntity entity) {
// be compatible with the timeline data created before 2.6
if (entity.getDomainId() == null) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
}
}
| 16,839 | 33.227642 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
import static org.apache.hadoop.yarn.server.timeline.TimelineDataManager.DEFAULT_DOMAIN_ID;
/**
* In-memory implementation of {@link TimelineStore}. This
* implementation is for test purpose only. If users improperly instantiate it,
* they may encounter reading and writing history data in different memory
* store.
*
* The methods are synchronized to avoid concurrent modification on the memory.
*
*/
@Private
@Unstable
public class MemoryTimelineStore
extends AbstractService implements TimelineStore {
private Map<EntityIdentifier, TimelineEntity> entities =
new HashMap<EntityIdentifier, TimelineEntity>();
private Map<EntityIdentifier, Long> entityInsertTimes =
new HashMap<EntityIdentifier, Long>();
private Map<String, TimelineDomain> domainsById =
new HashMap<String, TimelineDomain>();
private Map<String, Set<TimelineDomain>> domainsByOwner =
new HashMap<String, Set<TimelineDomain>>();
public MemoryTimelineStore() {
super(MemoryTimelineStore.class.getName());
}
@Override
public synchronized TimelineEntities getEntities(String entityType, Long limit,
Long windowStart, Long windowEnd, String fromId, Long fromTs,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields, CheckAcl checkAcl) throws IOException {
if (limit == null) {
limit = DEFAULT_LIMIT;
}
if (windowStart == null) {
windowStart = Long.MIN_VALUE;
}
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
Iterator<TimelineEntity> entityIterator = null;
if (fromId != null) {
TimelineEntity firstEntity = entities.get(new EntityIdentifier(fromId,
entityType));
if (firstEntity == null) {
return new TimelineEntities();
} else {
entityIterator = new TreeSet<TimelineEntity>(entities.values())
.tailSet(firstEntity, true).iterator();
}
}
if (entityIterator == null) {
entityIterator = new PriorityQueue<TimelineEntity>(entities.values())
.iterator();
}
List<TimelineEntity> entitiesSelected = new ArrayList<TimelineEntity>();
while (entityIterator.hasNext()) {
TimelineEntity entity = entityIterator.next();
if (entitiesSelected.size() >= limit) {
break;
}
if (!entity.getEntityType().equals(entityType)) {
continue;
}
if (entity.getStartTime() <= windowStart) {
continue;
}
if (entity.getStartTime() > windowEnd) {
continue;
}
if (fromTs != null && entityInsertTimes.get(new EntityIdentifier(
entity.getEntityId(), entity.getEntityType())) > fromTs) {
continue;
}
if (primaryFilter != null &&
!matchPrimaryFilter(entity.getPrimaryFilters(), primaryFilter)) {
continue;
}
if (secondaryFilters != null) { // AND logic
boolean flag = true;
for (NameValuePair secondaryFilter : secondaryFilters) {
if (secondaryFilter != null && !matchPrimaryFilter(
entity.getPrimaryFilters(), secondaryFilter) &&
!matchFilter(entity.getOtherInfo(), secondaryFilter)) {
flag = false;
break;
}
}
if (!flag) {
continue;
}
}
if (entity.getDomainId() == null) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
if (checkAcl == null || checkAcl.check(entity)) {
entitiesSelected.add(entity);
}
}
List<TimelineEntity> entitiesToReturn = new ArrayList<TimelineEntity>();
for (TimelineEntity entitySelected : entitiesSelected) {
entitiesToReturn.add(maskFields(entitySelected, fields));
}
Collections.sort(entitiesToReturn);
TimelineEntities entitiesWrapper = new TimelineEntities();
entitiesWrapper.setEntities(entitiesToReturn);
return entitiesWrapper;
}
@Override
public synchronized TimelineEntity getEntity(String entityId, String entityType,
EnumSet<Field> fieldsToRetrieve) {
if (fieldsToRetrieve == null) {
fieldsToRetrieve = EnumSet.allOf(Field.class);
}
TimelineEntity entity = entities.get(new EntityIdentifier(entityId, entityType));
if (entity == null) {
return null;
} else {
return maskFields(entity, fieldsToRetrieve);
}
}
@Override
public synchronized TimelineEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd,
Set<String> eventTypes) {
TimelineEvents allEvents = new TimelineEvents();
if (entityIds == null) {
return allEvents;
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
if (windowStart == null) {
windowStart = Long.MIN_VALUE;
}
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
for (String entityId : entityIds) {
EntityIdentifier entityID = new EntityIdentifier(entityId, entityType);
TimelineEntity entity = entities.get(entityID);
if (entity == null) {
continue;
}
EventsOfOneEntity events = new EventsOfOneEntity();
events.setEntityId(entityId);
events.setEntityType(entityType);
for (TimelineEvent event : entity.getEvents()) {
if (events.getEvents().size() >= limit) {
break;
}
if (event.getTimestamp() <= windowStart) {
continue;
}
if (event.getTimestamp() > windowEnd) {
continue;
}
if (eventTypes != null && !eventTypes.contains(event.getEventType())) {
continue;
}
events.addEvent(event);
}
allEvents.addEvent(events);
}
return allEvents;
}
@Override
public TimelineDomain getDomain(String domainId)
throws IOException {
TimelineDomain domain = domainsById.get(domainId);
if (domain == null) {
return null;
} else {
return createTimelineDomain(
domain.getId(),
domain.getDescription(),
domain.getOwner(),
domain.getReaders(),
domain.getWriters(),
domain.getCreatedTime(),
domain.getModifiedTime());
}
}
@Override
public TimelineDomains getDomains(String owner)
throws IOException {
List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
Set<TimelineDomain> domainsOfOneOwner = domainsByOwner.get(owner);
if (domainsOfOneOwner == null) {
return new TimelineDomains();
}
for (TimelineDomain domain : domainsByOwner.get(owner)) {
TimelineDomain domainToReturn = createTimelineDomain(
domain.getId(),
domain.getDescription(),
domain.getOwner(),
domain.getReaders(),
domain.getWriters(),
domain.getCreatedTime(),
domain.getModifiedTime());
domains.add(domainToReturn);
}
Collections.sort(domains, new Comparator<TimelineDomain>() {
@Override
public int compare(
TimelineDomain domain1, TimelineDomain domain2) {
int result = domain2.getCreatedTime().compareTo(
domain1.getCreatedTime());
if (result == 0) {
return domain2.getModifiedTime().compareTo(
domain1.getModifiedTime());
} else {
return result;
}
}
});
TimelineDomains domainsToReturn = new TimelineDomains();
domainsToReturn.addDomains(domains);
return domainsToReturn;
}
@Override
public synchronized TimelinePutResponse put(TimelineEntities data) {
TimelinePutResponse response = new TimelinePutResponse();
for (TimelineEntity entity : data.getEntities()) {
EntityIdentifier entityId =
new EntityIdentifier(entity.getEntityId(), entity.getEntityType());
// store entity info in memory
TimelineEntity existingEntity = entities.get(entityId);
if (existingEntity == null) {
existingEntity = new TimelineEntity();
existingEntity.setEntityId(entity.getEntityId());
existingEntity.setEntityType(entity.getEntityType());
existingEntity.setStartTime(entity.getStartTime());
if (entity.getDomainId() == null ||
entity.getDomainId().length() == 0) {
TimelinePutError error = new TimelinePutError();
error.setEntityId(entityId.getId());
error.setEntityType(entityId.getType());
error.setErrorCode(TimelinePutError.NO_DOMAIN);
response.addError(error);
continue;
}
existingEntity.setDomainId(entity.getDomainId());
entities.put(entityId, existingEntity);
entityInsertTimes.put(entityId, System.currentTimeMillis());
}
if (entity.getEvents() != null) {
if (existingEntity.getEvents() == null) {
existingEntity.setEvents(entity.getEvents());
} else {
existingEntity.addEvents(entity.getEvents());
}
Collections.sort(existingEntity.getEvents());
}
// check startTime
if (existingEntity.getStartTime() == null) {
if (existingEntity.getEvents() == null
|| existingEntity.getEvents().isEmpty()) {
TimelinePutError error = new TimelinePutError();
error.setEntityId(entityId.getId());
error.setEntityType(entityId.getType());
error.setErrorCode(TimelinePutError.NO_START_TIME);
response.addError(error);
entities.remove(entityId);
entityInsertTimes.remove(entityId);
continue;
} else {
Long min = Long.MAX_VALUE;
for (TimelineEvent e : entity.getEvents()) {
if (min > e.getTimestamp()) {
min = e.getTimestamp();
}
}
existingEntity.setStartTime(min);
}
}
if (entity.getPrimaryFilters() != null) {
if (existingEntity.getPrimaryFilters() == null) {
existingEntity.setPrimaryFilters(new HashMap<String, Set<Object>>());
}
for (Entry<String, Set<Object>> pf :
entity.getPrimaryFilters().entrySet()) {
for (Object pfo : pf.getValue()) {
existingEntity.addPrimaryFilter(pf.getKey(), maybeConvert(pfo));
}
}
}
if (entity.getOtherInfo() != null) {
if (existingEntity.getOtherInfo() == null) {
existingEntity.setOtherInfo(new HashMap<String, Object>());
}
for (Entry<String, Object> info : entity.getOtherInfo().entrySet()) {
existingEntity.addOtherInfo(info.getKey(),
maybeConvert(info.getValue()));
}
}
// relate it to other entities
if (entity.getRelatedEntities() == null) {
continue;
}
for (Map.Entry<String, Set<String>> partRelatedEntities : entity
.getRelatedEntities().entrySet()) {
if (partRelatedEntities == null) {
continue;
}
for (String idStr : partRelatedEntities.getValue()) {
EntityIdentifier relatedEntityId =
new EntityIdentifier(idStr, partRelatedEntities.getKey());
TimelineEntity relatedEntity = entities.get(relatedEntityId);
if (relatedEntity != null) {
if (relatedEntity.getDomainId().equals(
existingEntity.getDomainId())) {
relatedEntity.addRelatedEntity(
existingEntity.getEntityType(), existingEntity.getEntityId());
} else {
// in this case the entity will be put, but the relation will be
// ignored
TimelinePutError error = new TimelinePutError();
error.setEntityType(existingEntity.getEntityType());
error.setEntityId(existingEntity.getEntityId());
error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
response.addError(error);
}
} else {
relatedEntity = new TimelineEntity();
relatedEntity.setEntityId(relatedEntityId.getId());
relatedEntity.setEntityType(relatedEntityId.getType());
relatedEntity.setStartTime(existingEntity.getStartTime());
relatedEntity.addRelatedEntity(existingEntity.getEntityType(),
existingEntity.getEntityId());
relatedEntity.setDomainId(existingEntity.getDomainId());
entities.put(relatedEntityId, relatedEntity);
entityInsertTimes.put(relatedEntityId, System.currentTimeMillis());
}
}
}
}
return response;
}
public void put(TimelineDomain domain) throws IOException {
TimelineDomain domainToReplace =
domainsById.get(domain.getId());
Long currentTimestamp = System.currentTimeMillis();
TimelineDomain domainToStore = createTimelineDomain(
domain.getId(), domain.getDescription(), domain.getOwner(),
domain.getReaders(), domain.getWriters(),
(domainToReplace == null ?
currentTimestamp : domainToReplace.getCreatedTime()),
currentTimestamp);
domainsById.put(domainToStore.getId(), domainToStore);
Set<TimelineDomain> domainsByOneOwner =
domainsByOwner.get(domainToStore.getOwner());
if (domainsByOneOwner == null) {
domainsByOneOwner = new HashSet<TimelineDomain>();
domainsByOwner.put(domainToStore.getOwner(), domainsByOneOwner);
}
if (domainToReplace != null) {
domainsByOneOwner.remove(domainToReplace);
}
domainsByOneOwner.add(domainToStore);
}
private static TimelineDomain createTimelineDomain(
String id, String description, String owner,
String readers, String writers,
Long createdTime, Long modifiedTime) {
TimelineDomain domainToStore = new TimelineDomain();
domainToStore.setId(id);
domainToStore.setDescription(description);
domainToStore.setOwner(owner);
domainToStore.setReaders(readers);
domainToStore.setWriters(writers);
domainToStore.setCreatedTime(createdTime);
domainToStore.setModifiedTime(modifiedTime);
return domainToStore;
}
private static TimelineEntity maskFields(
TimelineEntity entity, EnumSet<Field> fields) {
// Conceal the fields that are not going to be exposed
TimelineEntity entityToReturn = new TimelineEntity();
entityToReturn.setEntityId(entity.getEntityId());
entityToReturn.setEntityType(entity.getEntityType());
entityToReturn.setStartTime(entity.getStartTime());
entityToReturn.setDomainId(entity.getDomainId());
// Deep copy
if (fields.contains(Field.EVENTS)) {
entityToReturn.addEvents(entity.getEvents());
} else if (fields.contains(Field.LAST_EVENT_ONLY)) {
entityToReturn.addEvent(entity.getEvents().get(0));
} else {
entityToReturn.setEvents(null);
}
if (fields.contains(Field.RELATED_ENTITIES)) {
entityToReturn.addRelatedEntities(entity.getRelatedEntities());
} else {
entityToReturn.setRelatedEntities(null);
}
if (fields.contains(Field.PRIMARY_FILTERS)) {
entityToReturn.addPrimaryFilters(entity.getPrimaryFilters());
} else {
entityToReturn.setPrimaryFilters(null);
}
if (fields.contains(Field.OTHER_INFO)) {
entityToReturn.addOtherInfo(entity.getOtherInfo());
} else {
entityToReturn.setOtherInfo(null);
}
return entityToReturn;
}
private static boolean matchFilter(Map<String, Object> tags,
NameValuePair filter) {
Object value = tags.get(filter.getName());
if (value == null) { // doesn't have the filter
return false;
} else if (!value.equals(filter.getValue())) { // doesn't match the filter
return false;
}
return true;
}
private static boolean matchPrimaryFilter(Map<String, Set<Object>> tags,
NameValuePair filter) {
Set<Object> value = tags.get(filter.getName());
if (value == null) { // doesn't have the filter
return false;
} else {
return value.contains(filter.getValue());
}
}
private static Object maybeConvert(Object o) {
if (o instanceof Long) {
Long l = (Long)o;
if (l >= Integer.MIN_VALUE && l <= Integer.MAX_VALUE) {
return l.intValue();
}
}
return o;
}
}
| 18,549 | 35.372549 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
@Private
@Unstable
public interface TimelineStore extends
Service, TimelineReader, TimelineWriter {
/**
* The system filter which will be automatically added to a
* {@link TimelineEntity}'s primary filter section when storing the entity.
* The filter key is case sensitive. Users are supposed not to use the key
* reserved by the timeline system.
*/
@Private
enum SystemFilter {
ENTITY_OWNER
}
}
| 1,532 | 34.651163 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* The unique identifier for an entity
*/
@Private
@Unstable
public class EntityIdentifier implements Comparable<EntityIdentifier> {
private String id;
private String type;
public EntityIdentifier(String id, String type) {
this.id = id;
this.type = type;
}
/**
* Get the entity Id.
* @return The entity Id.
*/
public String getId() {
return id;
}
/**
* Get the entity type.
* @return The entity type.
*/
public String getType() {
return type;
}
@Override
public int compareTo(EntityIdentifier other) {
int c = type.compareTo(other.type);
if (c != 0) return c;
return id.compareTo(other.id);
}
@Override
public int hashCode() {
// generated by eclipse
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
// generated by eclipse
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EntityIdentifier other = (EntityIdentifier) obj;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
@Override
public String toString() {
return "{ id: " + id + ", type: "+ type + " }";
}
}
| 2,600 | 24.752475 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.GregorianCalendar;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.TimeZone;
import java.util.TreeMap;
import java.util.Map.Entry;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
/**
* Contains the logic to lookup a leveldb by timestamp so that multiple smaller
* databases can roll according to the configured period and evicted efficiently
* via operating system directory removal.
*/
class RollingLevelDB {
/** Logger for this class. */
private static final Log LOG = LogFactory.getLog(RollingLevelDB.class);
/** Factory to open and create new leveldb instances. */
private static JniDBFactory factory = new JniDBFactory();
/** Thread safe date formatter. */
private FastDateFormat fdf;
/** Date parser. */
private SimpleDateFormat sdf;
/** Calendar to calculate the current and next rolling period. */
private GregorianCalendar cal = new GregorianCalendar(
TimeZone.getTimeZone("GMT"));
/** Collection of all active rolling leveldb instances. */
private final TreeMap<Long, DB> rollingdbs;
/** Collection of all rolling leveldb instances to evict. */
private final TreeMap<Long, DB> rollingdbsToEvict;
/** Name of this rolling level db. */
private final String name;
/** Calculated timestamp of when to roll a new leveldb instance. */
private volatile long nextRollingCheckMillis = 0;
/** File system instance to find and create new leveldb instances. */
private FileSystem lfs = null;
/** Directory to store rolling leveldb instances. */
private Path rollingDBPath;
/** Configuration for this object. */
private Configuration conf;
/** Rolling period. */
private RollingPeriod rollingPeriod;
/**
* Rolling leveldb instances are evicted when their endtime is earlier than
* the current time minus the time to live value.
*/
private long ttl;
/** Whether time to live is enabled. */
private boolean ttlEnabled;
/** Encapsulates the rolling period to date format lookup. */
enum RollingPeriod {
DAILY {
@Override
public String dateFormat() {
return "yyyy-MM-dd";
}
},
HALF_DAILY {
@Override
public String dateFormat() {
return "yyyy-MM-dd-HH";
}
},
QUARTER_DAILY {
@Override
public String dateFormat() {
return "yyyy-MM-dd-HH";
}
},
HOURLY {
@Override
public String dateFormat() {
return "yyyy-MM-dd-HH";
}
},
MINUTELY {
@Override
public String dateFormat() {
return "yyyy-MM-dd-HH-mm";
}
};
public abstract String dateFormat();
}
/**
* Convenience class for associating a write batch with its rolling leveldb
* instance.
*/
public static class RollingWriteBatch {
/** Leveldb object. */
private final DB db;
/** Write batch for the db object. */
private final WriteBatch writeBatch;
public RollingWriteBatch(final DB db, final WriteBatch writeBatch) {
this.db = db;
this.writeBatch = writeBatch;
}
public DB getDB() {
return db;
}
public WriteBatch getWriteBatch() {
return writeBatch;
}
public void write() {
db.write(writeBatch);
}
public void close() {
IOUtils.cleanup(LOG, writeBatch);
}
}
RollingLevelDB(String name) {
this.name = name;
this.rollingdbs = new TreeMap<Long, DB>();
this.rollingdbsToEvict = new TreeMap<Long, DB>();
}
protected String getName() {
return name;
}
protected long currentTimeMillis() {
return System.currentTimeMillis();
}
public long getNextRollingTimeMillis() {
return nextRollingCheckMillis;
}
public long getTimeToLive() {
return ttl;
}
public boolean getTimeToLiveEnabled() {
return ttlEnabled;
}
protected void setNextRollingTimeMillis(final long timestamp) {
this.nextRollingCheckMillis = timestamp;
LOG.info("Next rolling time for " + getName() + " is "
+ fdf.format(nextRollingCheckMillis));
}
public void init(final Configuration config) throws Exception {
LOG.info("Initializing RollingLevelDB for " + getName());
this.conf = config;
this.ttl = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS);
this.ttlEnabled = conf.getBoolean(
YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true);
this.rollingDBPath = new Path(
conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH),
RollingLevelDBTimelineStore.FILENAME);
initFileSystem();
initRollingPeriod();
initHistoricalDBs();
}
protected void initFileSystem() throws IOException {
lfs = FileSystem.getLocal(conf);
boolean success = lfs.mkdirs(rollingDBPath,
RollingLevelDBTimelineStore.LEVELDB_DIR_UMASK);
if (!success) {
throw new IOException("Failed to create leveldb root directory "
+ rollingDBPath);
}
}
protected synchronized void initRollingPeriod() {
final String lcRollingPeriod = conf.get(
YarnConfiguration.TIMELINE_SERVICE_ROLLING_PERIOD,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ROLLING_PERIOD);
this.rollingPeriod = RollingPeriod.valueOf(lcRollingPeriod
.toUpperCase(Locale.ENGLISH));
fdf = FastDateFormat.getInstance(rollingPeriod.dateFormat(),
TimeZone.getTimeZone("GMT"));
sdf = new SimpleDateFormat(rollingPeriod.dateFormat());
sdf.setTimeZone(fdf.getTimeZone());
}
protected synchronized void initHistoricalDBs() throws IOException {
Path rollingDBGlobPath = new Path(rollingDBPath, getName() + ".*");
FileStatus[] statuses = lfs.globStatus(rollingDBGlobPath);
for (FileStatus status : statuses) {
String dbName = FilenameUtils.getExtension(status.getPath().toString());
try {
Long dbStartTime = sdf.parse(dbName).getTime();
initRollingLevelDB(dbStartTime, status.getPath());
} catch (ParseException pe) {
LOG.warn("Failed to initialize rolling leveldb " + dbName + " for "
+ getName());
}
}
}
private void initRollingLevelDB(Long dbStartTime,
Path rollingInstanceDBPath) {
if (rollingdbs.containsKey(dbStartTime)) {
return;
}
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
options.maxOpenFiles(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES));
options.writeBufferSize(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE));
LOG.info("Initializing rolling leveldb instance :" + rollingInstanceDBPath
+ " for start time: " + dbStartTime);
DB db = null;
try {
db = factory.open(
new File(rollingInstanceDBPath.toUri().getPath()), options);
rollingdbs.put(dbStartTime, db);
String dbName = fdf.format(dbStartTime);
LOG.info("Added rolling leveldb instance " + dbName + " to " + getName());
} catch (IOException ioe) {
LOG.warn("Failed to open rolling leveldb instance :"
+ new File(rollingInstanceDBPath.toUri().getPath()), ioe);
}
}
synchronized DB getPreviousDB(DB db) {
Iterator<DB> iterator = rollingdbs.values().iterator();
DB prev = null;
while (iterator.hasNext()) {
DB cur = iterator.next();
if (cur == db) {
break;
}
prev = cur;
}
return prev;
}
synchronized long getStartTimeFor(DB db) {
long startTime = -1;
for (Map.Entry<Long, DB> entry : rollingdbs.entrySet()) {
if (entry.getValue() == db) {
startTime = entry.getKey();
}
}
return startTime;
}
public synchronized DB getDBForStartTime(long startTime) {
// make sure we sanitize this input
startTime = Math.min(startTime, currentTimeMillis());
if (startTime >= getNextRollingTimeMillis()) {
roll(startTime);
}
Entry<Long, DB> entry = rollingdbs.floorEntry(startTime);
if (entry == null) {
return null;
}
return entry.getValue();
}
private void roll(long startTime) {
LOG.info("Rolling new DB instance for " + getName());
long currentStartTime = computeCurrentCheckMillis(startTime);
setNextRollingTimeMillis(computeNextCheckMillis(currentStartTime));
String currentRollingDBInstance = fdf.format(currentStartTime);
String currentRollingDBName = getName() + "." + currentRollingDBInstance;
Path currentRollingDBPath = new Path(rollingDBPath, currentRollingDBName);
if (getTimeToLiveEnabled()) {
scheduleOldDBsForEviction();
}
initRollingLevelDB(currentStartTime, currentRollingDBPath);
}
private synchronized void scheduleOldDBsForEviction() {
// keep at least time to live amount of data
long evictionThreshold = computeCurrentCheckMillis(currentTimeMillis()
- getTimeToLive());
LOG.info("Scheduling " + getName() + " DBs older than "
+ fdf.format(evictionThreshold) + " for eviction");
Iterator<Entry<Long, DB>> iterator = rollingdbs.entrySet().iterator();
while (iterator.hasNext()) {
Entry<Long, DB> entry = iterator.next();
// parse this in gmt time
if (entry.getKey() < evictionThreshold) {
LOG.info("Scheduling " + getName() + " eviction for "
+ fdf.format(entry.getKey()));
iterator.remove();
rollingdbsToEvict.put(entry.getKey(), entry.getValue());
}
}
}
public synchronized void evictOldDBs() {
LOG.info("Evicting " + getName() + " DBs scheduled for eviction");
Iterator<Entry<Long, DB>> iterator = rollingdbsToEvict.entrySet()
.iterator();
while (iterator.hasNext()) {
Entry<Long, DB> entry = iterator.next();
IOUtils.cleanup(LOG, entry.getValue());
String dbName = fdf.format(entry.getKey());
Path path = new Path(rollingDBPath, getName() + "." + dbName);
try {
LOG.info("Removing old db directory contents in " + path);
lfs.delete(path, true);
} catch (IOException ioe) {
LOG.warn("Failed to evict old db " + path, ioe);
}
iterator.remove();
}
}
public void stop() throws Exception {
for (DB db : rollingdbs.values()) {
IOUtils.cleanup(LOG, db);
}
IOUtils.cleanup(LOG, lfs);
}
private long computeNextCheckMillis(long now) {
return computeCheckMillis(now, true);
}
public long computeCurrentCheckMillis(long now) {
return computeCheckMillis(now, false);
}
private synchronized long computeCheckMillis(long now, boolean next) {
// needs to be called synchronously due to shared Calendar
cal.setTimeInMillis(now);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
if (rollingPeriod == RollingPeriod.DAILY) {
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
if (next) {
cal.add(Calendar.DATE, 1);
}
} else if (rollingPeriod == RollingPeriod.HALF_DAILY) {
// round down to 12 hour interval
int hour = (cal.get(Calendar.HOUR) / 12) * 12;
cal.set(Calendar.HOUR, hour);
cal.set(Calendar.MINUTE, 0);
if (next) {
cal.add(Calendar.HOUR_OF_DAY, 12);
}
} else if (rollingPeriod == RollingPeriod.QUARTER_DAILY) {
// round down to 6 hour interval
int hour = (cal.get(Calendar.HOUR) / 6) * 6;
cal.set(Calendar.HOUR, hour);
cal.set(Calendar.MINUTE, 0);
if (next) {
cal.add(Calendar.HOUR_OF_DAY, 6);
}
} else if (rollingPeriod == RollingPeriod.HOURLY) {
cal.set(Calendar.MINUTE, 0);
if (next) {
cal.add(Calendar.HOUR_OF_DAY, 1);
}
} else if (rollingPeriod == RollingPeriod.MINUTELY) {
// round down to 5 minute interval
int minute = (cal.get(Calendar.MINUTE) / 5) * 5;
cal.set(Calendar.MINUTE, minute);
if (next) {
cal.add(Calendar.MINUTE, 5);
}
}
return cal.getTimeInMillis();
}
}
| 13,845 | 31.888361 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManagerMetrics.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
/** This class tracks metrics for the TimelineDataManager. */
@Metrics(about="Metrics for TimelineDataManager", context="yarn")
public class TimelineDataManagerMetrics {
@Metric("getEntities calls")
MutableCounterLong getEntitiesOps;
@Metric("Entities returned via getEntities")
MutableCounterLong getEntitiesTotal;
@Metric("getEntities processing time")
MutableRate getEntitiesTime;
@Metric("getEntity calls")
MutableCounterLong getEntityOps;
@Metric("getEntity processing time")
MutableRate getEntityTime;
@Metric("getEvents calls")
MutableCounterLong getEventsOps;
@Metric("Events returned via getEvents")
MutableCounterLong getEventsTotal;
@Metric("getEvents processing time")
MutableRate getEventsTime;
@Metric("postEntities calls")
MutableCounterLong postEntitiesOps;
@Metric("Entities posted via postEntities")
MutableCounterLong postEntitiesTotal;
@Metric("postEntities processing time")
MutableRate postEntitiesTime;
@Metric("putDomain calls")
MutableCounterLong putDomainOps;
@Metric("putDomain processing time")
MutableRate putDomainTime;
@Metric("getDomain calls")
MutableCounterLong getDomainOps;
@Metric("getDomain processing time")
MutableRate getDomainTime;
@Metric("getDomains calls")
MutableCounterLong getDomainsOps;
@Metric("Domains returned via getDomains")
MutableCounterLong getDomainsTotal;
@Metric("getDomains processing time")
MutableRate getDomainsTime;
@Metric("Total calls")
public long totalOps() {
return getEntitiesOps.value() +
getEntityOps.value() +
getEventsOps.value() +
postEntitiesOps.value() +
putDomainOps.value() +
getDomainOps.value() +
getDomainsOps.value();
}
TimelineDataManagerMetrics() {
}
public static TimelineDataManagerMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
return ms.register(new TimelineDataManagerMetrics());
}
public void incrGetEntitiesOps() {
getEntitiesOps.incr();
}
public void incrGetEntitiesTotal(long delta) {
getEntitiesTotal.incr(delta);
}
public void addGetEntitiesTime(long msec) {
getEntitiesTime.add(msec);
}
public void incrGetEntityOps() {
getEntityOps.incr();
}
public void addGetEntityTime(long msec) {
getEntityTime.add(msec);
}
public void incrGetEventsOps() {
getEventsOps.incr();
}
public void incrGetEventsTotal(long delta) {
getEventsTotal.incr(delta);
}
public void addGetEventsTime(long msec) {
getEventsTime.add(msec);
}
public void incrPostEntitiesOps() {
postEntitiesOps.incr();
}
public void incrPostEntitiesTotal(long delta) {
postEntitiesTotal.incr(delta);
}
public void addPostEntitiesTime(long msec) {
postEntitiesTime.add(msec);
}
public void incrPutDomainOps() {
putDomainOps.incr();
}
public void addPutDomainTime(long msec) {
putDomainTime.add(msec);
}
public void incrGetDomainOps() {
getDomainOps.incr();
}
public void addGetDomainTime(long msec) {
getDomainTime.add(msec);
}
public void incrGetDomainsOps() {
getDomainsOps.incr();
}
public void incrGetDomainsTotal(long delta) {
getDomainsTotal.incr(delta);
}
public void addGetDomainsTime(long msec) {
getDomainsTime.add(msec);
}
}
| 4,519 | 24.828571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
/**
* This interface is for storing timeline information.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface TimelineWriter {
/**
* Stores entity information to the timeline store. Any errors occurring for
* individual put request objects will be reported in the response.
*
* @param data
* a {@link TimelineEntities} object.
* @return a {@link TimelinePutResponse} object.
* @throws IOException
*/
TimelinePutResponse put(TimelineEntities data) throws IOException;
/**
* Store domain information to the timeline store. If A domain of the
* same ID already exists in the timeline store, it will be COMPLETELY updated
* with the given domain.
*
* @param domain
* a {@link TimelineDomain} object
* @throws IOException
*/
void put(TimelineDomain domain) throws IOException;
}
| 2,094 | 34.508475 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.timeline.*;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder;
import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.*;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong;
import static org.apache.hadoop.yarn.server.timeline.TimelineDataManager.DEFAULT_DOMAIN_ID;
import static org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.prefixMatches;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
/**
* <p>An implementation of an application timeline store backed by leveldb.</p>
*
* <p>There are three sections of the db, the start time section,
* the entity section, and the indexed entity section.</p>
*
* <p>The start time section is used to retrieve the unique start time for
* a given entity. Its values each contain a start time while its keys are of
* the form:</p>
* <pre>
* START_TIME_LOOKUP_PREFIX + entity type + entity id</pre>
*
* <p>The entity section is ordered by entity type, then entity start time
* descending, then entity ID. There are four sub-sections of the entity
* section: events, primary filters, related entities,
* and other info. The event entries have event info serialized into their
* values. The other info entries have values corresponding to the values of
* the other info name/value map for the entry (note the names are contained
* in the key). All other entries have empty values. The key structure is as
* follows:</p>
* <pre>
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* EVENTS_COLUMN + reveventtimestamp + eventtype
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* PRIMARY_FILTERS_COLUMN + name + value
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* OTHER_INFO_COLUMN + name
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* RELATED_ENTITIES_COLUMN + relatedentity type + relatedentity id
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* DOMAIN_ID_COLUMN
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN + relatedentity type +
* relatedentity id</pre>
*
* <p>The indexed entity section contains a primary filter name and primary
* filter value as the prefix. Within a given name/value, entire entity
* entries are stored in the same format as described in the entity section
* above (below, "key" represents any one of the possible entity entry keys
* described above).</p>
* <pre>
* INDEXED_ENTRY_PREFIX + primaryfilter name + primaryfilter value +
* key</pre>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LeveldbTimelineStore extends AbstractService
implements TimelineStore {
private static final Log LOG = LogFactory
.getLog(LeveldbTimelineStore.class);
@Private
@VisibleForTesting
static final String FILENAME = "leveldb-timeline-store.ldb";
private static final byte[] START_TIME_LOOKUP_PREFIX = "k".getBytes(Charset.forName("UTF-8"));
private static final byte[] ENTITY_ENTRY_PREFIX = "e".getBytes(Charset.forName("UTF-8"));
private static final byte[] INDEXED_ENTRY_PREFIX = "i".getBytes(Charset.forName("UTF-8"));
private static final byte[] EVENTS_COLUMN = "e".getBytes(Charset.forName("UTF-8"));
private static final byte[] PRIMARY_FILTERS_COLUMN = "f".getBytes(Charset.forName("UTF-8"));
private static final byte[] OTHER_INFO_COLUMN = "i".getBytes(Charset.forName("UTF-8"));
private static final byte[] RELATED_ENTITIES_COLUMN = "r".getBytes(Charset.forName("UTF-8"));
private static final byte[] INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN =
"z".getBytes(Charset.forName("UTF-8"));
private static final byte[] DOMAIN_ID_COLUMN = "d".getBytes(Charset.forName("UTF-8"));
private static final byte[] DOMAIN_ENTRY_PREFIX = "d".getBytes(Charset.forName("UTF-8"));
private static final byte[] OWNER_LOOKUP_PREFIX = "o".getBytes(Charset.forName("UTF-8"));
private static final byte[] DESCRIPTION_COLUMN = "d".getBytes(Charset.forName("UTF-8"));
private static final byte[] OWNER_COLUMN = "o".getBytes(Charset.forName("UTF-8"));
private static final byte[] READER_COLUMN = "r".getBytes(Charset.forName("UTF-8"));
private static final byte[] WRITER_COLUMN = "w".getBytes(Charset.forName("UTF-8"));
private static final byte[] TIMESTAMP_COLUMN = "t".getBytes(Charset.forName("UTF-8"));
private static final byte[] EMPTY_BYTES = new byte[0];
private static final String TIMELINE_STORE_VERSION_KEY = "timeline-store-version";
private static final Version CURRENT_VERSION_INFO = Version
.newInstance(1, 0);
@Private
@VisibleForTesting
static final FsPermission LEVELDB_DIR_UMASK = FsPermission
.createImmutable((short) 0700);
private Map<EntityIdentifier, StartAndInsertTime> startTimeWriteCache;
private Map<EntityIdentifier, Long> startTimeReadCache;
/**
* Per-entity locks are obtained when writing.
*/
private final LockMap<EntityIdentifier> writeLocks =
new LockMap<EntityIdentifier>();
private final ReentrantReadWriteLock deleteLock =
new ReentrantReadWriteLock();
private DB db;
private Thread deletionThread;
public LeveldbTimelineStore() {
super(LeveldbTimelineStore.class.getName());
}
@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_TTL_MS);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0,
"%s property value should be greater than or equal to zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0,
" %s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
JniDBFactory factory = new JniDBFactory();
Path dbPath = new Path(
conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(conf);
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " +
"timeline store " + dbPath);
}
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
LOG.info("Using leveldb path " + dbPath);
db = factory.open(new File(dbPath.toString()), options);
checkVersion();
startTimeWriteCache =
Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(
conf)));
startTimeReadCache =
Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(
conf)));
if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
deletionThread = new EntityDeletionThread(conf);
deletionThread.start();
}
super.serviceInit(conf);
}
@Override
protected void serviceStop() throws Exception {
if (deletionThread != null) {
deletionThread.interrupt();
LOG.info("Waiting for deletion thread to complete its current action");
try {
deletionThread.join();
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for deletion thread to complete," +
" closing db now", e);
}
}
IOUtils.cleanup(LOG, db);
super.serviceStop();
}
private static class StartAndInsertTime {
final long startTime;
final long insertTime;
public StartAndInsertTime(long startTime, long insertTime) {
this.startTime = startTime;
this.insertTime = insertTime;
}
}
private class EntityDeletionThread extends Thread {
private final long ttl;
private final long ttlInterval;
public EntityDeletionThread(Configuration conf) {
ttl = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS);
ttlInterval = conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
LOG.info("Starting deletion thread with ttl " + ttl + " and cycle " +
"interval " + ttlInterval);
}
@Override
public void run() {
while (true) {
long timestamp = System.currentTimeMillis() - ttl;
try {
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
LOG.error(e);
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
}
}
}
}
private static class LockMap<K> {
private static class CountingReentrantLock<K> extends ReentrantLock {
private static final long serialVersionUID = 1L;
private int count;
private K key;
CountingReentrantLock(K key) {
super();
this.count = 0;
this.key = key;
}
}
private Map<K, CountingReentrantLock<K>> locks =
new HashMap<K, CountingReentrantLock<K>>();
synchronized CountingReentrantLock<K> getLock(K key) {
CountingReentrantLock<K> lock = locks.get(key);
if (lock == null) {
lock = new CountingReentrantLock<K>(key);
locks.put(key, lock);
}
lock.count++;
return lock;
}
synchronized void returnLock(CountingReentrantLock<K> lock) {
if (lock.count == 0) {
throw new IllegalStateException("Returned lock more times than it " +
"was retrieved");
}
lock.count--;
if (lock.count == 0) {
locks.remove(lock.key);
}
}
}
@Override
public TimelineEntity getEntity(String entityId, String entityType,
EnumSet<Field> fields) throws IOException {
Long revStartTime = getStartTimeLong(entityId, entityType);
if (revStartTime == null) {
return null;
}
byte[] prefix = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType).add(writeReverseOrderedLong(revStartTime))
.add(entityId).getBytesForLookup();
LeveldbIterator iterator = null;
try {
iterator = new LeveldbIterator(db);
iterator.seek(prefix);
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
return getEntity(entityId, entityType, revStartTime, fields, iterator,
prefix, prefix.length);
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
/**
* Read entity from a db iterator. If no information is found in the
* specified fields for this entity, return null.
*/
private static TimelineEntity getEntity(String entityId, String entityType,
Long startTime, EnumSet<Field> fields, LeveldbIterator iterator,
byte[] prefix, int prefixlen) throws IOException {
TimelineEntity entity = new TimelineEntity();
boolean events = false;
boolean lastEvent = false;
if (fields.contains(Field.EVENTS)) {
events = true;
} else if (fields.contains(Field.LAST_EVENT_ONLY)) {
lastEvent = true;
} else {
entity.setEvents(null);
}
boolean relatedEntities = false;
if (fields.contains(Field.RELATED_ENTITIES)) {
relatedEntities = true;
} else {
entity.setRelatedEntities(null);
}
boolean primaryFilters = false;
if (fields.contains(Field.PRIMARY_FILTERS)) {
primaryFilters = true;
} else {
entity.setPrimaryFilters(null);
}
boolean otherInfo = false;
if (fields.contains(Field.OTHER_INFO)) {
otherInfo = true;
} else {
entity.setOtherInfo(null);
}
// iterate through the entity's entry, parsing information if it is part
// of a requested field
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefixlen, key)) {
break;
}
if (key.length == prefixlen) {
continue;
}
if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
if (primaryFilters) {
addPrimaryFilter(entity, key,
prefixlen + PRIMARY_FILTERS_COLUMN.length);
}
} else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
if (otherInfo) {
entity.addOtherInfo(parseRemainingKey(key,
prefixlen + OTHER_INFO_COLUMN.length),
GenericObjectMapper.read(iterator.peekNext().getValue()));
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
if (relatedEntities) {
addRelatedEntity(entity, key,
prefixlen + RELATED_ENTITIES_COLUMN.length);
}
} else if (key[prefixlen] == EVENTS_COLUMN[0]) {
if (events || (lastEvent &&
entity.getEvents().size() == 0)) {
TimelineEvent event = getEntityEvent(null, key, prefixlen +
EVENTS_COLUMN.length, iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
} else if (key[prefixlen] == DOMAIN_ID_COLUMN[0]) {
byte[] v = iterator.peekNext().getValue();
String domainId = new String(v, Charset.forName("UTF-8"));
entity.setDomainId(domainId);
} else {
if (key[prefixlen] !=
INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
LOG.warn(String.format("Found unexpected column for entity %s of " +
"type %s (0x%02x)", entityId, entityType, key[prefixlen]));
}
}
}
entity.setEntityId(entityId);
entity.setEntityType(entityType);
entity.setStartTime(startTime);
return entity;
}
@Override
public TimelineEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd, Set<String> eventType) throws IOException {
TimelineEvents events = new TimelineEvents();
if (entityIds == null || entityIds.isEmpty()) {
return events;
}
// create a lexicographically-ordered map from start time to entities
Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[],
List<EntityIdentifier>>(new Comparator<byte[]>() {
@Override
public int compare(byte[] o1, byte[] o2) {
return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0,
o2.length);
}
});
LeveldbIterator iterator = null;
try {
// look up start times for the specified entities
// skip entities with no start time
for (String entityId : entityIds) {
byte[] startTime = getStartTime(entityId, entityType);
if (startTime != null) {
List<EntityIdentifier> entities = startTimeMap.get(startTime);
if (entities == null) {
entities = new ArrayList<EntityIdentifier>();
startTimeMap.put(startTime, entities);
}
entities.add(new EntityIdentifier(entityId, entityType));
}
}
for (Entry<byte[], List<EntityIdentifier>> entry :
startTimeMap.entrySet()) {
// look up the events matching the given parameters (limit,
// start time, end time, event types) for entities whose start times
// were found and add the entities to the return list
byte[] revStartTime = entry.getKey();
for (EntityIdentifier entityIdentifier : entry.getValue()) {
EventsOfOneEntity entity = new EventsOfOneEntity();
entity.setEntityId(entityIdentifier.getId());
entity.setEntityType(entityType);
events.addEvent(entity);
KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType).add(revStartTime).add(entityIdentifier.getId())
.add(EVENTS_COLUMN);
byte[] prefix = kb.getBytesForLookup();
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
byte[] revts = writeReverseOrderedLong(windowEnd);
kb.add(revts);
byte[] first = kb.getBytesForLookup();
byte[] last = null;
if (windowStart != null) {
last = KeyBuilder.newInstance().add(prefix)
.add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
iterator = new LeveldbIterator(db);
for (iterator.seek(first); entity.getEvents().size() < limit &&
iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key) || (last != null &&
WritableComparator.compareBytes(key, 0, key.length, last, 0,
last.length) > 0)) {
break;
}
TimelineEvent event = getEntityEvent(eventType, key, prefix.length,
iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
}
}
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
return events;
}
@Override
public TimelineEntities getEntities(String entityType,
Long limit, Long windowStart, Long windowEnd, String fromId, Long fromTs,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields, CheckAcl checkAcl) throws IOException {
if (primaryFilter == null) {
// if no primary filter is specified, prefix the lookup with
// ENTITY_ENTRY_PREFIX
return getEntityByTime(ENTITY_ENTRY_PREFIX, entityType, limit,
windowStart, windowEnd, fromId, fromTs, secondaryFilters,
fields, checkAcl);
} else {
// if a primary filter is specified, prefix the lookup with
// INDEXED_ENTRY_PREFIX + primaryFilterName + primaryFilterValue +
// ENTITY_ENTRY_PREFIX
byte[] base = KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX)
.add(primaryFilter.getName())
.add(GenericObjectMapper.write(primaryFilter.getValue()), true)
.add(ENTITY_ENTRY_PREFIX).getBytesForLookup();
return getEntityByTime(base, entityType, limit, windowStart, windowEnd,
fromId, fromTs, secondaryFilters, fields, checkAcl);
}
}
/**
* Retrieves a list of entities satisfying given parameters.
*
* @param base A byte array prefix for the lookup
* @param entityType The type of the entity
* @param limit A limit on the number of entities to return
* @param starttime The earliest entity start time to retrieve (exclusive)
* @param endtime The latest entity start time to retrieve (inclusive)
* @param fromId Retrieve entities starting with this entity
* @param fromTs Ignore entities with insert timestamp later than this ts
* @param secondaryFilters Filter pairs that the entities should match
* @param fields The set of fields to retrieve
* @return A list of entities
* @throws IOException
*/
private TimelineEntities getEntityByTime(byte[] base,
String entityType, Long limit, Long starttime, Long endtime,
String fromId, Long fromTs, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields, CheckAcl checkAcl) throws IOException {
// Even if other info and primary filter fields are not included, we
// still need to load them to match secondary filters when they are
// non-empty
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
boolean addPrimaryFilters = false;
boolean addOtherInfo = false;
if (secondaryFilters != null && secondaryFilters.size() > 0) {
if (!fields.contains(Field.PRIMARY_FILTERS)) {
fields.add(Field.PRIMARY_FILTERS);
addPrimaryFilters = true;
}
if (!fields.contains(Field.OTHER_INFO)) {
fields.add(Field.OTHER_INFO);
addOtherInfo = true;
}
}
LeveldbIterator iterator = null;
try {
KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
// only db keys matching the prefix (base + entity type) will be parsed
byte[] prefix = kb.getBytesForLookup();
if (endtime == null) {
// if end time is null, place no restriction on end time
endtime = Long.MAX_VALUE;
}
// construct a first key that will be seeked to using end time or fromId
byte[] first = null;
if (fromId != null) {
Long fromIdStartTime = getStartTimeLong(fromId, entityType);
if (fromIdStartTime == null) {
// no start time for provided id, so return empty entities
return new TimelineEntities();
}
if (fromIdStartTime <= endtime) {
// if provided id's start time falls before the end of the window,
// use it to construct the seek key
first = kb.add(writeReverseOrderedLong(fromIdStartTime))
.add(fromId).getBytesForLookup();
}
}
// if seek key wasn't constructed using fromId, construct it using end ts
if (first == null) {
first = kb.add(writeReverseOrderedLong(endtime)).getBytesForLookup();
}
byte[] last = null;
if (starttime != null) {
// if start time is not null, set a last key that will not be
// iterated past
last = KeyBuilder.newInstance().add(base).add(entityType)
.add(writeReverseOrderedLong(starttime)).getBytesForLookup();
}
if (limit == null) {
// if limit is not specified, use the default
limit = DEFAULT_LIMIT;
}
TimelineEntities entities = new TimelineEntities();
iterator = new LeveldbIterator(db);
iterator.seek(first);
// iterate until one of the following conditions is met: limit is
// reached, there are no more keys, the key prefix no longer matches,
// or a start time has been specified and reached/exceeded
while (entities.getEntities().size() < limit && iterator.hasNext()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key) || (last != null &&
WritableComparator.compareBytes(key, 0, key.length, last, 0,
last.length) > 0)) {
break;
}
// read the start time and entity id from the current key
KeyParser kp = new KeyParser(key, prefix.length);
Long startTime = kp.getNextLong();
String entityId = kp.getNextString();
if (fromTs != null) {
long insertTime = readReverseOrderedLong(iterator.peekNext()
.getValue(), 0);
if (insertTime > fromTs) {
byte[] firstKey = key;
while (iterator.hasNext() && prefixMatches(firstKey,
kp.getOffset(), key)) {
iterator.next();
key = iterator.peekNext().getKey();
}
continue;
}
}
// parse the entity that owns this key, iterating over all keys for
// the entity
TimelineEntity entity = getEntity(entityId, entityType, startTime,
fields, iterator, key, kp.getOffset());
// determine if the retrieved entity matches the provided secondary
// filters, and if so add it to the list of entities to return
boolean filterPassed = true;
if (secondaryFilters != null) {
for (NameValuePair filter : secondaryFilters) {
Object v = entity.getOtherInfo().get(filter.getName());
if (v == null) {
Set<Object> vs = entity.getPrimaryFilters()
.get(filter.getName());
if (vs == null || !vs.contains(filter.getValue())) {
filterPassed = false;
break;
}
} else if (!v.equals(filter.getValue())) {
filterPassed = false;
break;
}
}
}
if (filterPassed) {
if (entity.getDomainId() == null) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
if (checkAcl == null || checkAcl.check(entity)) {
// Remove primary filter and other info if they are added for
// matching secondary filters
if (addPrimaryFilters) {
entity.setPrimaryFilters(null);
}
if (addOtherInfo) {
entity.setOtherInfo(null);
}
entities.addEntity(entity);
}
}
}
return entities;
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
/**
* Handle error and set it in response.
*/
private static void handleError(TimelineEntity entity, TimelinePutResponse response, final int errorCode) {
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(errorCode);
response.addError(error);
}
/**
* Put a single entity. If there is an error, add a TimelinePutError to the
* given response.
*/
private void put(TimelineEntity entity, TimelinePutResponse response,
boolean allowEmptyDomainId) {
LockMap.CountingReentrantLock<EntityIdentifier> lock =
writeLocks.getLock(new EntityIdentifier(entity.getEntityId(),
entity.getEntityType()));
lock.lock();
WriteBatch writeBatch = null;
List<EntityIdentifier> relatedEntitiesWithoutStartTimes =
new ArrayList<EntityIdentifier>();
byte[] revStartTime = null;
Map<String, Set<Object>> primaryFilters = null;
try {
writeBatch = db.createWriteBatch();
List<TimelineEvent> events = entity.getEvents();
// look up the start time for the entity
StartAndInsertTime startAndInsertTime = getAndSetStartTime(
entity.getEntityId(), entity.getEntityType(),
entity.getStartTime(), events);
if (startAndInsertTime == null) {
// if no start time is found, add an error and return
handleError(entity, response, TimelinePutError.NO_START_TIME);
return;
}
revStartTime = writeReverseOrderedLong(startAndInsertTime
.startTime);
primaryFilters = entity.getPrimaryFilters();
// write entity marker
byte[] markerKey = createEntityMarkerKey(entity.getEntityId(),
entity.getEntityType(), revStartTime);
byte[] markerValue = writeReverseOrderedLong(startAndInsertTime
.insertTime);
writeBatch.put(markerKey, markerValue);
writePrimaryFilterEntries(writeBatch, primaryFilters, markerKey,
markerValue);
// write event entries
if (events != null && !events.isEmpty()) {
for (TimelineEvent event : events) {
byte[] revts = writeReverseOrderedLong(event.getTimestamp());
byte[] key = createEntityEventKey(entity.getEntityId(),
entity.getEntityType(), revStartTime, revts,
event.getEventType());
byte[] value = GenericObjectMapper.write(event.getEventInfo());
writeBatch.put(key, value);
writePrimaryFilterEntries(writeBatch, primaryFilters, key, value);
}
}
// write related entity entries
Map<String, Set<String>> relatedEntities =
entity.getRelatedEntities();
if (relatedEntities != null && !relatedEntities.isEmpty()) {
for (Entry<String, Set<String>> relatedEntityList :
relatedEntities.entrySet()) {
String relatedEntityType = relatedEntityList.getKey();
for (String relatedEntityId : relatedEntityList.getValue()) {
// invisible "reverse" entries (entity -> related entity)
byte[] key = createReverseRelatedEntityKey(entity.getEntityId(),
entity.getEntityType(), revStartTime, relatedEntityId,
relatedEntityType);
writeBatch.put(key, EMPTY_BYTES);
// look up start time of related entity
byte[] relatedEntityStartTime = getStartTime(relatedEntityId,
relatedEntityType);
// delay writing the related entity if no start time is found
if (relatedEntityStartTime == null) {
relatedEntitiesWithoutStartTimes.add(
new EntityIdentifier(relatedEntityId, relatedEntityType));
continue;
} else {
// This is the existing entity
byte[] domainIdBytes = db.get(createDomainIdKey(
relatedEntityId, relatedEntityType, relatedEntityStartTime));
// The timeline data created by the server before 2.6 won't have
// the domain field. We assume this timeline data is in the
// default timeline domain.
String domainId = null;
if (domainIdBytes == null) {
domainId = TimelineDataManager.DEFAULT_DOMAIN_ID;
} else {
domainId = new String(domainIdBytes, Charset.forName("UTF-8"));
}
if (!domainId.equals(entity.getDomainId())) {
// in this case the entity will be put, but the relation will be
// ignored
handleError(entity, response, TimelinePutError.FORBIDDEN_RELATION);
continue;
}
}
// write "forward" entry (related entity -> entity)
key = createRelatedEntityKey(relatedEntityId,
relatedEntityType, relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType());
writeBatch.put(key, EMPTY_BYTES);
}
}
}
// write primary filter entries
if (primaryFilters != null && !primaryFilters.isEmpty()) {
for (Entry<String, Set<Object>> primaryFilter :
primaryFilters.entrySet()) {
for (Object primaryFilterValue : primaryFilter.getValue()) {
byte[] key = createPrimaryFilterKey(entity.getEntityId(),
entity.getEntityType(), revStartTime,
primaryFilter.getKey(), primaryFilterValue);
writeBatch.put(key, EMPTY_BYTES);
writePrimaryFilterEntries(writeBatch, primaryFilters, key,
EMPTY_BYTES);
}
}
}
// write other info entries
Map<String, Object> otherInfo = entity.getOtherInfo();
if (otherInfo != null && !otherInfo.isEmpty()) {
for (Entry<String, Object> i : otherInfo.entrySet()) {
byte[] key = createOtherInfoKey(entity.getEntityId(),
entity.getEntityType(), revStartTime, i.getKey());
byte[] value = GenericObjectMapper.write(i.getValue());
writeBatch.put(key, value);
writePrimaryFilterEntries(writeBatch, primaryFilters, key, value);
}
}
// write domain id entry
byte[] key = createDomainIdKey(entity.getEntityId(),
entity.getEntityType(), revStartTime);
if (entity.getDomainId() == null ||
entity.getDomainId().length() == 0) {
if (!allowEmptyDomainId) {
handleError(entity, response, TimelinePutError.NO_DOMAIN);
return;
}
} else {
writeBatch.put(key, entity.getDomainId().getBytes(Charset.forName("UTF-8")));
writePrimaryFilterEntries(writeBatch, primaryFilters, key,
entity.getDomainId().getBytes(Charset.forName("UTF-8")));
}
db.write(writeBatch);
} catch (DBException de) {
LOG.error("Error putting entity " + entity.getEntityId() +
" of type " + entity.getEntityType(), de);
handleError(entity, response, TimelinePutError.IO_EXCEPTION);
} catch (IOException e) {
LOG.error("Error putting entity " + entity.getEntityId() +
" of type " + entity.getEntityType(), e);
handleError(entity, response, TimelinePutError.IO_EXCEPTION);
} finally {
lock.unlock();
writeLocks.returnLock(lock);
IOUtils.cleanup(LOG, writeBatch);
}
for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
lock = writeLocks.getLock(relatedEntity);
lock.lock();
try {
StartAndInsertTime relatedEntityStartAndInsertTime =
getAndSetStartTime(relatedEntity.getId(), relatedEntity.getType(),
readReverseOrderedLong(revStartTime, 0), null);
if (relatedEntityStartAndInsertTime == null) {
throw new IOException("Error setting start time for related entity");
}
byte[] relatedEntityStartTime = writeReverseOrderedLong(
relatedEntityStartAndInsertTime.startTime);
// This is the new entity, the domain should be the same
byte[] key = createDomainIdKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime);
db.put(key, entity.getDomainId().getBytes(Charset.forName("UTF-8")));
db.put(createRelatedEntityKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
db.put(createEntityMarkerKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime),
writeReverseOrderedLong(relatedEntityStartAndInsertTime
.insertTime));
} catch (DBException de) {
LOG.error("Error putting related entity " + relatedEntity.getId() +
" of type " + relatedEntity.getType() + " for entity " +
entity.getEntityId() + " of type " + entity.getEntityType(), de);
handleError(entity, response, TimelinePutError.IO_EXCEPTION);
} catch (IOException e) {
LOG.error("Error putting related entity " + relatedEntity.getId() +
" of type " + relatedEntity.getType() + " for entity " +
entity.getEntityId() + " of type " + entity.getEntityType(), e);
handleError(entity, response, TimelinePutError.IO_EXCEPTION);
} finally {
lock.unlock();
writeLocks.returnLock(lock);
}
}
}
/**
* For a given key / value pair that has been written to the db,
* write additional entries to the db for each primary filter.
*/
private static void writePrimaryFilterEntries(WriteBatch writeBatch,
Map<String, Set<Object>> primaryFilters, byte[] key, byte[] value)
throws IOException {
if (primaryFilters != null && !primaryFilters.isEmpty()) {
for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
for (Object pfval : pf.getValue()) {
writeBatch.put(addPrimaryFilterToKey(pf.getKey(), pfval,
key), value);
}
}
}
}
@Override
public TimelinePutResponse put(TimelineEntities entities) {
try {
deleteLock.readLock().lock();
TimelinePutResponse response = new TimelinePutResponse();
for (TimelineEntity entity : entities.getEntities()) {
put(entity, response, false);
}
return response;
} finally {
deleteLock.readLock().unlock();
}
}
@Private
@VisibleForTesting
public TimelinePutResponse putWithNoDomainId(TimelineEntities entities) {
try {
deleteLock.readLock().lock();
TimelinePutResponse response = new TimelinePutResponse();
for (TimelineEntity entity : entities.getEntities()) {
put(entity, response, true);
}
return response;
} finally {
deleteLock.readLock().unlock();
}
}
/**
* Get the unique start time for a given entity as a byte array that sorts
* the timestamps in reverse order (see {@link
* GenericObjectMapper#writeReverseOrderedLong(long)}).
*
* @param entityId The id of the entity
* @param entityType The type of the entity
* @return A byte array, null if not found
* @throws IOException
*/
private byte[] getStartTime(String entityId, String entityType)
throws IOException {
Long l = getStartTimeLong(entityId, entityType);
return l == null ? null : writeReverseOrderedLong(l);
}
/**
* Get the unique start time for a given entity as a Long.
*
* @param entityId The id of the entity
* @param entityType The type of the entity
* @return A Long, null if not found
* @throws IOException
*/
private Long getStartTimeLong(String entityId, String entityType)
throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
try {
// start time is not provided, so try to look it up
if (startTimeReadCache.containsKey(entity)) {
// found the start time in the cache
return startTimeReadCache.get(entity);
} else {
// try to look up the start time in the db
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
byte[] v = db.get(b);
if (v == null) {
// did not find the start time in the db
return null;
} else {
// found the start time in the db
Long l = readReverseOrderedLong(v, 0);
startTimeReadCache.put(entity, l);
return l;
}
}
} catch(DBException e) {
throw new IOException(e);
}
}
/**
* Get the unique start time for a given entity as a byte array that sorts
* the timestamps in reverse order (see {@link
* GenericObjectMapper#writeReverseOrderedLong(long)}). If the start time
* doesn't exist, set it based on the information provided. Should only be
* called when a lock has been obtained on the entity.
*
* @param entityId The id of the entity
* @param entityType The type of the entity
* @param startTime The start time of the entity, or null
* @param events A list of events for the entity, or null
* @return A StartAndInsertTime
* @throws IOException
*/
private StartAndInsertTime getAndSetStartTime(String entityId,
String entityType, Long startTime, List<TimelineEvent> events)
throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
if (startTime == null) {
// start time is not provided, so try to look it up
if (startTimeWriteCache.containsKey(entity)) {
// found the start time in the cache
return startTimeWriteCache.get(entity);
} else {
if (events != null) {
// prepare a start time from events in case it is needed
Long min = Long.MAX_VALUE;
for (TimelineEvent e : events) {
if (min > e.getTimestamp()) {
min = e.getTimestamp();
}
}
startTime = min;
}
return checkStartTimeInDb(entity, startTime);
}
} else {
// start time is provided
if (startTimeWriteCache.containsKey(entity)) {
// always use start time from cache if it exists
return startTimeWriteCache.get(entity);
} else {
// check the provided start time matches the db
return checkStartTimeInDb(entity, startTime);
}
}
}
/**
* Checks db for start time and returns it if it exists. If it doesn't
* exist, writes the suggested start time (if it is not null). This is
* only called when the start time is not found in the cache,
* so it adds it back into the cache if it is found. Should only be called
* when a lock has been obtained on the entity.
*/
private StartAndInsertTime checkStartTimeInDb(EntityIdentifier entity,
Long suggestedStartTime) throws IOException {
StartAndInsertTime startAndInsertTime = null;
// create lookup key for start time
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
try {
// retrieve value for key
byte[] v = db.get(b);
if (v == null) {
// start time doesn't exist in db
if (suggestedStartTime == null) {
return null;
}
startAndInsertTime = new StartAndInsertTime(suggestedStartTime,
System.currentTimeMillis());
// write suggested start time
v = new byte[16];
writeReverseOrderedLong(suggestedStartTime, v, 0);
writeReverseOrderedLong(startAndInsertTime.insertTime, v, 8);
WriteOptions writeOptions = new WriteOptions();
writeOptions.sync(true);
db.put(b, v, writeOptions);
} else {
// found start time in db, so ignore suggested start time
startAndInsertTime = new StartAndInsertTime(readReverseOrderedLong(v, 0),
readReverseOrderedLong(v, 8));
}
} catch(DBException e) {
throw new IOException(e);
}
startTimeWriteCache.put(entity, startAndInsertTime);
startTimeReadCache.put(entity, startAndInsertTime.startTime);
return startAndInsertTime;
}
/**
* Creates a key for looking up the start time of a given entity,
* of the form START_TIME_LOOKUP_PREFIX + entity type + entity id.
*/
private static byte[] createStartTimeLookupKey(String entityId,
String entityType) throws IOException {
return KeyBuilder.newInstance().add(START_TIME_LOOKUP_PREFIX)
.add(entityType).add(entityId).getBytes();
}
/**
* Creates an entity marker, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id.
*/
private static byte[] createEntityMarkerKey(String entityId,
String entityType, byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType).add(revStartTime).add(entityId).getBytesForLookup();
}
/**
* Creates an index entry for the given key of the form
* INDEXED_ENTRY_PREFIX + primaryfiltername + primaryfiltervalue + key.
*/
private static byte[] addPrimaryFilterToKey(String primaryFilterName,
Object primaryFilterValue, byte[] key) throws IOException {
return KeyBuilder.newInstance().add(INDEXED_ENTRY_PREFIX)
.add(primaryFilterName)
.add(GenericObjectMapper.write(primaryFilterValue), true).add(key)
.getBytes();
}
/**
* Creates an event key, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id + EVENTS_COLUMN + reveventtimestamp + event type.
*/
private static byte[] createEntityEventKey(String entityId,
String entityType, byte[] revStartTime, byte[] revEventTimestamp,
String eventType) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType).add(revStartTime).add(entityId).add(EVENTS_COLUMN)
.add(revEventTimestamp).add(eventType).getBytes();
}
/**
* Creates an event object from the given key, offset, and value. If the
* event type is not contained in the specified set of event types,
* returns null.
*/
private static TimelineEvent getEntityEvent(Set<String> eventTypes,
byte[] key, int offset, byte[] value) throws IOException {
KeyParser kp = new KeyParser(key, offset);
long ts = kp.getNextLong();
String tstype = kp.getNextString();
if (eventTypes == null || eventTypes.contains(tstype)) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(ts);
event.setEventType(tstype);
Object o = GenericObjectMapper.read(value);
if (o == null) {
event.setEventInfo(null);
} else if (o instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> m = (Map<String, Object>) o;
event.setEventInfo(m);
} else {
throw new IOException("Couldn't deserialize event info map");
}
return event;
}
return null;
}
/**
* Creates a primary filter key, serializing ENTITY_ENTRY_PREFIX +
* entity type + revstarttime + entity id + PRIMARY_FILTERS_COLUMN + name +
* value.
*/
private static byte[] createPrimaryFilterKey(String entityId,
String entityType, byte[] revStartTime, String name, Object value)
throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
.add(revStartTime).add(entityId).add(PRIMARY_FILTERS_COLUMN).add(name)
.add(GenericObjectMapper.write(value)).getBytes();
}
/**
* Parses the primary filter from the given key at the given offset and
* adds it to the given entity.
*/
private static void addPrimaryFilter(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String name = kp.getNextString();
Object value = GenericObjectMapper.read(key, kp.getOffset());
entity.addPrimaryFilter(name, value);
}
/**
* Creates an other info key, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id + OTHER_INFO_COLUMN + name.
*/
private static byte[] createOtherInfoKey(String entityId, String entityType,
byte[] revStartTime, String name) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
.add(revStartTime).add(entityId).add(OTHER_INFO_COLUMN).add(name)
.getBytes();
}
/**
* Creates a string representation of the byte array from the given offset
* to the end of the array (for parsing other info keys).
*/
private static String parseRemainingKey(byte[] b, int offset) {
return new String(b, offset, b.length - offset, Charset.forName("UTF-8"));
}
/**
* Creates a related entity key, serializing ENTITY_ENTRY_PREFIX +
* entity type + revstarttime + entity id + RELATED_ENTITIES_COLUMN +
* relatedentity type + relatedentity id.
*/
private static byte[] createRelatedEntityKey(String entityId,
String entityType, byte[] revStartTime, String relatedEntityId,
String relatedEntityType) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
.add(revStartTime).add(entityId).add(RELATED_ENTITIES_COLUMN)
.add(relatedEntityType).add(relatedEntityId).getBytes();
}
/**
* Parses the related entity from the given key at the given offset and
* adds it to the given entity.
*/
private static void addRelatedEntity(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String type = kp.getNextString();
String id = kp.getNextString();
entity.addRelatedEntity(type, id);
}
/**
* Creates a reverse related entity key, serializing ENTITY_ENTRY_PREFIX +
* entity type + revstarttime + entity id +
* INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN +
* relatedentity type + relatedentity id.
*/
private static byte[] createReverseRelatedEntityKey(String entityId,
String entityType, byte[] revStartTime, String relatedEntityId,
String relatedEntityType) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
.add(revStartTime).add(entityId)
.add(INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN)
.add(relatedEntityType).add(relatedEntityId).getBytes();
}
/**
* Creates a domain id key, serializing ENTITY_ENTRY_PREFIX +
* entity type + revstarttime + entity id + DOMAIN_ID_COLUMN.
*/
private static byte[] createDomainIdKey(String entityId,
String entityType, byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType)
.add(revStartTime).add(entityId).add(DOMAIN_ID_COLUMN).getBytes();
}
/**
* Clears the cache to test reloading start times from leveldb (only for
* testing).
*/
@VisibleForTesting
void clearStartTimeCache() {
startTimeWriteCache.clear();
startTimeReadCache.clear();
}
@VisibleForTesting
static int getStartTimeReadCacheSize(Configuration conf) {
return conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
}
@VisibleForTesting
static int getStartTimeWriteCacheSize(Configuration conf) {
return conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
YarnConfiguration.
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
}
@VisibleForTesting
List<String> getEntityTypes() throws IOException {
LeveldbIterator iterator = null;
try {
iterator = getDbIterator(false);
List<String> entityTypes = new ArrayList<String>();
iterator.seek(ENTITY_ENTRY_PREFIX);
while (iterator.hasNext()) {
byte[] key = iterator.peekNext().getKey();
if (key[0] != ENTITY_ENTRY_PREFIX[0]) {
break;
}
KeyParser kp = new KeyParser(key,
ENTITY_ENTRY_PREFIX.length);
String entityType = kp.getNextString();
entityTypes.add(entityType);
byte[] lookupKey = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType).getBytesForLookup();
if (lookupKey[lookupKey.length - 1] != 0x0) {
throw new IOException("Found unexpected end byte in lookup key");
}
lookupKey[lookupKey.length - 1] = 0x1;
iterator.seek(lookupKey);
}
return entityTypes;
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
/**
* Finds all keys in the db that have a given prefix and deletes them on
* the given write batch.
*/
private void deleteKeysWithPrefix(WriteBatch writeBatch, byte[] prefix,
LeveldbIterator iterator) {
for (iterator.seek(prefix); iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
writeBatch.delete(key);
}
}
@VisibleForTesting
boolean deleteNextEntity(String entityType, byte[] reverseTimestamp,
LeveldbIterator iterator, LeveldbIterator pfIterator, boolean seeked)
throws IOException {
WriteBatch writeBatch = null;
try {
KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX)
.add(entityType);
byte[] typePrefix = kb.getBytesForLookup();
kb.add(reverseTimestamp);
if (!seeked) {
iterator.seek(kb.getBytesForLookup());
}
if (!iterator.hasNext()) {
return false;
}
byte[] entityKey = iterator.peekNext().getKey();
if (!prefixMatches(typePrefix, typePrefix.length, entityKey)) {
return false;
}
// read the start time and entity id from the current key
KeyParser kp = new KeyParser(entityKey, typePrefix.length + 8);
String entityId = kp.getNextString();
int prefixlen = kp.getOffset();
byte[] deletePrefix = new byte[prefixlen];
System.arraycopy(entityKey, 0, deletePrefix, 0, prefixlen);
writeBatch = db.createWriteBatch();
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" + entityId);
}
// remove start time from cache and db
writeBatch.delete(createStartTimeLookupKey(entityId, entityType));
EntityIdentifier entityIdentifier =
new EntityIdentifier(entityId, entityType);
startTimeReadCache.remove(entityIdentifier);
startTimeWriteCache.remove(entityIdentifier);
// delete current entity
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(entityKey, prefixlen, key)) {
break;
}
writeBatch.delete(key);
if (key.length == prefixlen) {
continue;
}
if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
kp = new KeyParser(key,
prefixlen + PRIMARY_FILTERS_COLUMN.length);
String name = kp.getNextString();
Object value = GenericObjectMapper.read(key, kp.getOffset());
deleteKeysWithPrefix(writeBatch, addPrimaryFilterToKey(name, value,
deletePrefix), pfIterator);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" +
entityId + " primary filter entry " + name + " " +
value);
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
kp = new KeyParser(key,
prefixlen + RELATED_ENTITIES_COLUMN.length);
String type = kp.getNextString();
String id = kp.getNextString();
byte[] relatedEntityStartTime = getStartTime(id, type);
if (relatedEntityStartTime == null) {
LOG.warn("Found no start time for " +
"related entity " + id + " of type " + type + " while " +
"deleting " + entityId + " of type " + entityType);
continue;
}
writeBatch.delete(createReverseRelatedEntityKey(id, type,
relatedEntityStartTime, entityId, entityType));
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" +
entityId + " from invisible reverse related entity " +
"entry of type:" + type + " id:" + id);
}
} else if (key[prefixlen] ==
INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
kp = new KeyParser(key, prefixlen +
INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN.length);
String type = kp.getNextString();
String id = kp.getNextString();
byte[] relatedEntityStartTime = getStartTime(id, type);
if (relatedEntityStartTime == null) {
LOG.warn("Found no start time for reverse " +
"related entity " + id + " of type " + type + " while " +
"deleting " + entityId + " of type " + entityType);
continue;
}
writeBatch.delete(createRelatedEntityKey(id, type,
relatedEntityStartTime, entityId, entityType));
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" +
entityId + " from related entity entry of type:" +
type + " id:" + id);
}
}
}
WriteOptions writeOptions = new WriteOptions();
writeOptions.sync(true);
db.write(writeBatch, writeOptions);
return true;
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, writeBatch);
}
}
/**
* Discards entities with start timestamp less than or equal to the given
* timestamp.
*/
@VisibleForTesting
void discardOldEntities(long timestamp)
throws IOException, InterruptedException {
byte[] reverseTimestamp = writeReverseOrderedLong(timestamp);
long totalCount = 0;
long t1 = System.currentTimeMillis();
try {
List<String> entityTypes = getEntityTypes();
for (String entityType : entityTypes) {
LeveldbIterator iterator = null;
LeveldbIterator pfIterator = null;
long typeCount = 0;
try {
deleteLock.writeLock().lock();
iterator = getDbIterator(false);
pfIterator = getDbIterator(false);
if (deletionThread != null && deletionThread.isInterrupted()) {
throw new InterruptedException();
}
boolean seeked = false;
while (deleteNextEntity(entityType, reverseTimestamp, iterator,
pfIterator, seeked)) {
typeCount++;
totalCount++;
seeked = true;
if (deletionThread != null && deletionThread.isInterrupted()) {
throw new InterruptedException();
}
}
} catch (IOException e) {
LOG.error("Got IOException while deleting entities for type " +
entityType + ", continuing to next type", e);
} finally {
IOUtils.cleanup(LOG, iterator, pfIterator);
deleteLock.writeLock().unlock();
if (typeCount > 0) {
LOG.info("Deleted " + typeCount + " entities of type " +
entityType);
}
}
}
} finally {
long t2 = System.currentTimeMillis();
LOG.info("Discarded " + totalCount + " entities for timestamp " +
timestamp + " and earlier in " + (t2 - t1) / 1000.0 + " seconds");
}
}
@VisibleForTesting
LeveldbIterator getDbIterator(boolean fillCache) {
ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(fillCache);
return new LeveldbIterator(db, readOptions);
}
Version loadVersion() throws IOException {
try {
byte[] data = db.get(bytes(TIMELINE_STORE_VERSION_KEY));
// if version is not stored previously, treat it as CURRENT_VERSION_INFO.
if (data == null || data.length == 0) {
return getCurrentVersion();
}
Version version =
new VersionPBImpl(VersionProto.parseFrom(data));
return version;
} catch(DBException e) {
throw new IOException(e);
}
}
// Only used for test
@VisibleForTesting
void storeVersion(Version state) throws IOException {
dbStoreVersion(state);
}
private void dbStoreVersion(Version state) throws IOException {
String key = TIMELINE_STORE_VERSION_KEY;
byte[] data =
((VersionPBImpl) state).getProto().toByteArray();
try {
db.put(bytes(key), data);
} catch (DBException e) {
throw new IOException(e);
}
}
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning timeline store: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of TS-store is a major upgrade, and any
* compatible change of TS-store is a minor upgrade.
* 3) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade timeline store or remove incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded timeline store version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing timeline store version info " + getCurrentVersion());
dbStoreVersion(CURRENT_VERSION_INFO);
} else {
String incompatibleMessage =
"Incompatible version for timeline store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
//TODO: make data retention work with the domain data as well
@Override
public void put(TimelineDomain domain) throws IOException {
WriteBatch writeBatch = null;
try {
writeBatch = db.createWriteBatch();
if (domain.getId() == null || domain.getId().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an ID");
}
if (domain.getOwner() == null || domain.getOwner().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an owner.");
}
// Write description
byte[] domainEntryKey = createDomainEntryKey(
domain.getId(), DESCRIPTION_COLUMN);
byte[] ownerLookupEntryKey = createOwnerLookupKey(
domain.getOwner(), domain.getId(), DESCRIPTION_COLUMN);
if (domain.getDescription() != null) {
writeBatch.put(domainEntryKey, domain.getDescription().
getBytes(Charset.forName("UTF-8")));
writeBatch.put(ownerLookupEntryKey, domain.getDescription().
getBytes(Charset.forName("UTF-8")));
} else {
writeBatch.put(domainEntryKey, EMPTY_BYTES);
writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write owner
domainEntryKey = createDomainEntryKey(domain.getId(), OWNER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(
domain.getOwner(), domain.getId(), OWNER_COLUMN);
// Null check for owner is done before
writeBatch.put(domainEntryKey, domain.getOwner().getBytes(Charset.forName("UTF-8")));
writeBatch.put(ownerLookupEntryKey, domain.getOwner().getBytes(Charset.forName("UTF-8")));
// Write readers
domainEntryKey = createDomainEntryKey(domain.getId(), READER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(
domain.getOwner(), domain.getId(), READER_COLUMN);
if (domain.getReaders() != null && domain.getReaders().length() > 0) {
writeBatch.put(domainEntryKey, domain.getReaders().getBytes(Charset.forName("UTF-8")));
writeBatch.put(ownerLookupEntryKey, domain.getReaders().
getBytes(Charset.forName("UTF-8")));
} else {
writeBatch.put(domainEntryKey, EMPTY_BYTES);
writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write writers
domainEntryKey = createDomainEntryKey(domain.getId(), WRITER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(
domain.getOwner(), domain.getId(), WRITER_COLUMN);
if (domain.getWriters() != null && domain.getWriters().length() > 0) {
writeBatch.put(domainEntryKey, domain.getWriters().getBytes(Charset.forName("UTF-8")));
writeBatch.put(ownerLookupEntryKey, domain.getWriters().
getBytes(Charset.forName("UTF-8")));
} else {
writeBatch.put(domainEntryKey, EMPTY_BYTES);
writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write creation time and modification time
// We put both timestamps together because they are always retrieved
// together, and store them in the same way as we did for the entity's
// start time and insert time.
domainEntryKey = createDomainEntryKey(domain.getId(), TIMESTAMP_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(
domain.getOwner(), domain.getId(), TIMESTAMP_COLUMN);
long currentTimestamp = System.currentTimeMillis();
byte[] timestamps = db.get(domainEntryKey);
if (timestamps == null) {
timestamps = new byte[16];
writeReverseOrderedLong(currentTimestamp, timestamps, 0);
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
} else {
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
}
writeBatch.put(domainEntryKey, timestamps);
writeBatch.put(ownerLookupEntryKey, timestamps);
db.write(writeBatch);
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, writeBatch);
}
}
/**
* Creates a domain entity key with column name suffix,
* of the form DOMAIN_ENTRY_PREFIX + domain id + column name.
*/
private static byte[] createDomainEntryKey(String domainId,
byte[] columnName) throws IOException {
return KeyBuilder.newInstance().add(DOMAIN_ENTRY_PREFIX)
.add(domainId).add(columnName).getBytes();
}
/**
* Creates an owner lookup key with column name suffix,
* of the form OWNER_LOOKUP_PREFIX + owner + domain id + column name.
*/
private static byte[] createOwnerLookupKey(
String owner, String domainId, byte[] columnName) throws IOException {
return KeyBuilder.newInstance().add(OWNER_LOOKUP_PREFIX)
.add(owner).add(domainId).add(columnName).getBytes();
}
@Override
public TimelineDomain getDomain(String domainId)
throws IOException {
LeveldbIterator iterator = null;
try {
byte[] prefix = KeyBuilder.newInstance()
.add(DOMAIN_ENTRY_PREFIX).add(domainId).getBytesForLookup();
iterator = new LeveldbIterator(db);
iterator.seek(prefix);
return getTimelineDomain(iterator, domainId, prefix);
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
@Override
public TimelineDomains getDomains(String owner)
throws IOException {
LeveldbIterator iterator = null;
try {
byte[] prefix = KeyBuilder.newInstance()
.add(OWNER_LOOKUP_PREFIX).add(owner).getBytesForLookup();
List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
for (iterator = new LeveldbIterator(db), iterator.seek(prefix);
iterator.hasNext();) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
// Iterator to parse the rows of an individual domain
KeyParser kp = new KeyParser(key, prefix.length);
String domainId = kp.getNextString();
byte[] prefixExt = KeyBuilder.newInstance().add(OWNER_LOOKUP_PREFIX)
.add(owner).add(domainId).getBytesForLookup();
TimelineDomain domainToReturn =
getTimelineDomain(iterator, domainId, prefixExt);
if (domainToReturn != null) {
domains.add(domainToReturn);
}
}
// Sort the domains to return
Collections.sort(domains, new Comparator<TimelineDomain>() {
@Override
public int compare(
TimelineDomain domain1, TimelineDomain domain2) {
int result = domain2.getCreatedTime().compareTo(
domain1.getCreatedTime());
if (result == 0) {
return domain2.getModifiedTime().compareTo(
domain1.getModifiedTime());
} else {
return result;
}
}
});
TimelineDomains domainsToReturn = new TimelineDomains();
domainsToReturn.addDomains(domains);
return domainsToReturn;
} catch(DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
private static TimelineDomain getTimelineDomain(
LeveldbIterator iterator, String domainId, byte[] prefix) throws IOException {
// Iterate over all the rows whose key starts with prefix to retrieve the
// domain information.
TimelineDomain domain = new TimelineDomain();
domain.setId(domainId);
boolean noRows = true;
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
if (noRows) {
noRows = false;
}
byte[] value = iterator.peekNext().getValue();
if (value != null && value.length > 0) {
if (key[prefix.length] == DESCRIPTION_COLUMN[0]) {
domain.setDescription(new String(value, Charset.forName("UTF-8")));
} else if (key[prefix.length] == OWNER_COLUMN[0]) {
domain.setOwner(new String(value, Charset.forName("UTF-8")));
} else if (key[prefix.length] == READER_COLUMN[0]) {
domain.setReaders(new String(value, Charset.forName("UTF-8")));
} else if (key[prefix.length] == WRITER_COLUMN[0]) {
domain.setWriters(new String(value, Charset.forName("UTF-8")));
} else if (key[prefix.length] == TIMESTAMP_COLUMN[0]) {
domain.setCreatedTime(readReverseOrderedLong(value, 0));
domain.setModifiedTime(readReverseOrderedLong(value, 8));
} else {
LOG.error("Unrecognized domain column: " + key[prefix.length]);
}
}
}
if (noRows) {
return null;
} else {
return domain;
}
}
}
| 72,148 | 38.577071 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
import org.apache.hadoop.yarn.server.timeline.RollingLevelDB.RollingWriteBatch;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder;
import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.DBIterator;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.ReadOptions;
import org.iq80.leveldb.WriteBatch;
import org.nustaq.serialization.FSTConfiguration;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.writeReverseOrderedLong;
import static org.apache.hadoop.yarn.server.timeline.TimelineDataManager.DEFAULT_DOMAIN_ID;
import static org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.prefixMatches;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_TTL_MS;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
/**
* <p>
* An implementation of an application timeline store backed by leveldb.
* </p>
*
* <p>
* There are three sections of the db, the start time section, the entity
* section, and the indexed entity section.
* </p>
*
* <p>
* The start time section is used to retrieve the unique start time for a given
* entity. Its values each contain a start time while its keys are of the form:
* </p>
*
* <pre>
* START_TIME_LOOKUP_PREFIX + entity type + entity id
* </pre>
*
* <p>
* The entity section is ordered by entity type, then entity start time
* descending, then entity ID. There are four sub-sections of the entity
* section: events, primary filters, related entities, and other info. The event
* entries have event info serialized into their values. The other info entries
* have values corresponding to the values of the other info name/value map for
* the entry (note the names are contained in the key). All other entries have
* empty values. The key structure is as follows:
* </p>
*
* <pre>
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* DOMAIN_ID_COLUMN
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* EVENTS_COLUMN + reveventtimestamp + eventtype
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* PRIMARY_FILTERS_COLUMN + name + value
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* OTHER_INFO_COLUMN + name
*
* ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
* RELATED_ENTITIES_COLUMN + relatedentity type + relatedentity id
* </pre>
*
* <p>
* The indexed entity section contains a primary filter name and primary filter
* value as the prefix. Within a given name/value, entire entity entries are
* stored in the same format as described in the entity section above (below,
* "key" represents any one of the possible entity entry keys described above).
* </p>
*
* <pre>
* INDEXED_ENTRY_PREFIX + primaryfilter name + primaryfilter value +
* key
* </pre>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class RollingLevelDBTimelineStore extends AbstractService implements
TimelineStore {
private static final Log LOG = LogFactory
.getLog(RollingLevelDBTimelineStore.class);
private static FSTConfiguration fstConf =
FSTConfiguration.createDefaultConfiguration();
static {
fstConf.setShareReferences(false);
}
@Private
@VisibleForTesting
static final String FILENAME = "leveldb-timeline-store";
static final String DOMAIN = "domain-ldb";
static final String ENTITY = "entity-ldb";
static final String INDEX = "indexes-ldb";
static final String STARTTIME = "starttime-ldb";
static final String OWNER = "owner-ldb";
private static final byte[] DOMAIN_ID_COLUMN = "d".getBytes(UTF_8);
private static final byte[] EVENTS_COLUMN = "e".getBytes(UTF_8);
private static final byte[] PRIMARY_FILTERS_COLUMN = "f".getBytes(UTF_8);
private static final byte[] OTHER_INFO_COLUMN = "i".getBytes(UTF_8);
private static final byte[] RELATED_ENTITIES_COLUMN = "r".getBytes(UTF_8);
private static final byte[] DESCRIPTION_COLUMN = "d".getBytes(UTF_8);
private static final byte[] OWNER_COLUMN = "o".getBytes(UTF_8);
private static final byte[] READER_COLUMN = "r".getBytes(UTF_8);
private static final byte[] WRITER_COLUMN = "w".getBytes(UTF_8);
private static final byte[] TIMESTAMP_COLUMN = "t".getBytes(UTF_8);
private static final byte[] EMPTY_BYTES = new byte[0];
private static final String TIMELINE_STORE_VERSION_KEY =
"timeline-store-version";
private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 0);
private static long writeBatchSize = 10000;
@Private
@VisibleForTesting
static final FsPermission LEVELDB_DIR_UMASK = FsPermission
.createImmutable((short) 0700);
private Map<EntityIdentifier, Long> startTimeWriteCache;
private Map<EntityIdentifier, Long> startTimeReadCache;
private DB domaindb;
private RollingLevelDB entitydb;
private RollingLevelDB indexdb;
private DB starttimedb;
private DB ownerdb;
private Thread deletionThread;
public RollingLevelDBTimelineStore() {
super(RollingLevelDBTimelineStore.class.getName());
}
@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
Preconditions
.checkArgument(conf.getLong(TIMELINE_SERVICE_TTL_MS,
DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0,
"%s property value should be greater than zero",
TIMELINE_SERVICE_TTL_MS);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0,
"%s property value should be greater than zero",
TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0,
"%s property value should be greater than or equal to zero",
TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0,
" %s property value should be greater than zero",
TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0,
"%s property value should be greater than zero",
TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES,
DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES) > 0,
"%s property value should be greater than zero",
TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES);
Preconditions.checkArgument(conf.getLong(
TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE) > 0,
"%s property value should be greater than zero",
TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE);
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(
TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
JniDBFactory factory = new JniDBFactory();
Path dbPath = new Path(
conf.get(TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
Path domainDBPath = new Path(dbPath, DOMAIN);
Path starttimeDBPath = new Path(dbPath, STARTTIME);
Path ownerDBPath = new Path(dbPath, OWNER);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(conf);
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb "
+ "timeline store " + dbPath);
}
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
if (!localFS.exists(domainDBPath)) {
if (!localFS.mkdirs(domainDBPath)) {
throw new IOException("Couldn't create directory for leveldb "
+ "timeline store " + domainDBPath);
}
localFS.setPermission(domainDBPath, LEVELDB_DIR_UMASK);
}
if (!localFS.exists(starttimeDBPath)) {
if (!localFS.mkdirs(starttimeDBPath)) {
throw new IOException("Couldn't create directory for leveldb "
+ "timeline store " + starttimeDBPath);
}
localFS.setPermission(starttimeDBPath, LEVELDB_DIR_UMASK);
}
if (!localFS.exists(ownerDBPath)) {
if (!localFS.mkdirs(ownerDBPath)) {
throw new IOException("Couldn't create directory for leveldb "
+ "timeline store " + ownerDBPath);
}
localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
options.maxOpenFiles(conf.getInt(
TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES,
DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES));
options.writeBufferSize(conf.getInt(
TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE));
LOG.info("Using leveldb path " + dbPath);
domaindb = factory.open(new File(domainDBPath.toString()), options);
entitydb = new RollingLevelDB(ENTITY);
entitydb.init(conf);
indexdb = new RollingLevelDB(INDEX);
indexdb.init(conf);
starttimedb = factory.open(new File(starttimeDBPath.toString()), options);
ownerdb = factory.open(new File(ownerDBPath.toString()), options);
checkVersion();
startTimeWriteCache = Collections.synchronizedMap(new LRUMap(
getStartTimeWriteCacheSize(conf)));
startTimeReadCache = Collections.synchronizedMap(new LRUMap(
getStartTimeReadCacheSize(conf)));
writeBatchSize = conf.getInt(
TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
if (getConfig().getBoolean(TIMELINE_SERVICE_TTL_ENABLE, true)) {
deletionThread = new EntityDeletionThread(getConfig());
deletionThread.start();
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (deletionThread != null) {
deletionThread.interrupt();
LOG.info("Waiting for deletion thread to complete its current action");
try {
deletionThread.join();
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for deletion thread to complete,"
+ " closing db now", e);
}
}
IOUtils.cleanup(LOG, domaindb);
IOUtils.cleanup(LOG, starttimedb);
IOUtils.cleanup(LOG, ownerdb);
entitydb.stop();
indexdb.stop();
super.serviceStop();
}
private class EntityDeletionThread extends Thread {
private final long ttl;
private final long ttlInterval;
public EntityDeletionThread(Configuration conf) {
ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
DEFAULT_TIMELINE_SERVICE_TTL_MS);
ttlInterval = conf.getLong(
TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
LOG.info("Starting deletion thread with ttl " + ttl + " and cycle "
+ "interval " + ttlInterval);
}
@Override
public void run() {
Thread.currentThread().setName("Leveldb Timeline Store Retention");
while (true) {
long timestamp = System.currentTimeMillis() - ttl;
try {
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
LOG.error(e);
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
}
}
}
}
@Override
public TimelineEntity getEntity(String entityId, String entityType,
EnumSet<Field> fields) throws IOException {
Long revStartTime = getStartTimeLong(entityId, entityType);
if (revStartTime == null) {
return null;
}
byte[] prefix = KeyBuilder.newInstance().add(entityType)
.add(writeReverseOrderedLong(revStartTime)).add(entityId)
.getBytesForLookup();
DBIterator iterator = null;
try {
DB db = entitydb.getDBForStartTime(revStartTime);
if (db == null) {
return null;
}
iterator = db.iterator();
iterator.seek(prefix);
return getEntity(entityId, entityType, revStartTime, fields, iterator,
prefix, prefix.length);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
/**
* Read entity from a db iterator. If no information is found in the specified
* fields for this entity, return null.
*/
private static TimelineEntity getEntity(String entityId, String entityType,
Long startTime, EnumSet<Field> fields, DBIterator iterator,
byte[] prefix, int prefixlen) throws IOException {
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
TimelineEntity entity = new TimelineEntity();
boolean events = false;
boolean lastEvent = false;
if (fields.contains(Field.EVENTS)) {
events = true;
} else if (fields.contains(Field.LAST_EVENT_ONLY)) {
lastEvent = true;
} else {
entity.setEvents(null);
}
boolean relatedEntities = false;
if (fields.contains(Field.RELATED_ENTITIES)) {
relatedEntities = true;
} else {
entity.setRelatedEntities(null);
}
boolean primaryFilters = false;
if (fields.contains(Field.PRIMARY_FILTERS)) {
primaryFilters = true;
} else {
entity.setPrimaryFilters(null);
}
boolean otherInfo = false;
if (fields.contains(Field.OTHER_INFO)) {
otherInfo = true;
} else {
entity.setOtherInfo(null);
}
// iterate through the entity's entry, parsing information if it is part
// of a requested field
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefixlen, key)) {
break;
}
if (key.length == prefixlen) {
continue;
}
if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
if (primaryFilters) {
addPrimaryFilter(entity, key, prefixlen
+ PRIMARY_FILTERS_COLUMN.length);
}
} else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
if (otherInfo) {
entity.addOtherInfo(
parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
fstConf.asObject(iterator.peekNext().getValue()));
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
if (relatedEntities) {
addRelatedEntity(entity, key, prefixlen
+ RELATED_ENTITIES_COLUMN.length);
}
} else if (key[prefixlen] == EVENTS_COLUMN[0]) {
if (events || (lastEvent && entity.getEvents().size() == 0)) {
TimelineEvent event = getEntityEvent(null, key, prefixlen
+ EVENTS_COLUMN.length, iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
} else if (key[prefixlen] == DOMAIN_ID_COLUMN[0]) {
byte[] v = iterator.peekNext().getValue();
String domainId = new String(v, UTF_8);
entity.setDomainId(domainId);
} else {
LOG.warn(String.format("Found unexpected column for entity %s of "
+ "type %s (0x%02x)", entityId, entityType, key[prefixlen]));
}
}
entity.setEntityId(entityId);
entity.setEntityType(entityType);
entity.setStartTime(startTime);
return entity;
}
@Override
public TimelineEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd, Set<String> eventType) throws IOException {
TimelineEvents events = new TimelineEvents();
if (entityIds == null || entityIds.isEmpty()) {
return events;
}
// create a lexicographically-ordered map from start time to entities
Map<byte[], List<EntityIdentifier>> startTimeMap =
new TreeMap<byte[], List<EntityIdentifier>>(
new Comparator<byte[]>() {
@Override
public int compare(byte[] o1, byte[] o2) {
return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0,
o2.length);
}
});
DBIterator iterator = null;
try {
// look up start times for the specified entities
// skip entities with no start time
for (String entityId : entityIds) {
byte[] startTime = getStartTime(entityId, entityType);
if (startTime != null) {
List<EntityIdentifier> entities = startTimeMap.get(startTime);
if (entities == null) {
entities = new ArrayList<EntityIdentifier>();
startTimeMap.put(startTime, entities);
}
entities.add(new EntityIdentifier(entityId, entityType));
}
}
for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap
.entrySet()) {
// look up the events matching the given parameters (limit,
// start time, end time, event types) for entities whose start times
// were found and add the entities to the return list
byte[] revStartTime = entry.getKey();
for (EntityIdentifier entityIdentifier : entry.getValue()) {
EventsOfOneEntity entity = new EventsOfOneEntity();
entity.setEntityId(entityIdentifier.getId());
entity.setEntityType(entityType);
events.addEvent(entity);
KeyBuilder kb = KeyBuilder.newInstance().add(entityType)
.add(revStartTime).add(entityIdentifier.getId())
.add(EVENTS_COLUMN);
byte[] prefix = kb.getBytesForLookup();
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
byte[] revts = writeReverseOrderedLong(windowEnd);
kb.add(revts);
byte[] first = kb.getBytesForLookup();
byte[] last = null;
if (windowStart != null) {
last = KeyBuilder.newInstance().add(prefix)
.add(writeReverseOrderedLong(windowStart)).getBytesForLookup();
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
DB db = entitydb.getDBForStartTime(readReverseOrderedLong(
revStartTime, 0));
if (db == null) {
continue;
}
iterator = db.iterator();
for (iterator.seek(first); entity.getEvents().size() < limit
&& iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)
|| (last != null && WritableComparator.compareBytes(key, 0,
key.length, last, 0, last.length) > 0)) {
break;
}
TimelineEvent event = getEntityEvent(eventType, key, prefix.length,
iterator.peekNext().getValue());
if (event != null) {
entity.addEvent(event);
}
}
}
}
} finally {
IOUtils.cleanup(LOG, iterator);
}
return events;
}
@Override
public TimelineEntities getEntities(String entityType, Long limit,
Long windowStart, Long windowEnd, String fromId, Long fromTs,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fields, CheckAcl checkAcl) throws IOException {
if (primaryFilter == null) {
// if no primary filter is specified, prefix the lookup with
// ENTITY_ENTRY_PREFIX
return getEntityByTime(EMPTY_BYTES, entityType, limit, windowStart,
windowEnd, fromId, fromTs, secondaryFilters, fields, checkAcl, false);
} else {
// if a primary filter is specified, prefix the lookup with
// INDEXED_ENTRY_PREFIX + primaryFilterName + primaryFilterValue +
// ENTITY_ENTRY_PREFIX
byte[] base = KeyBuilder.newInstance().add(primaryFilter.getName())
.add(fstConf.asByteArray(primaryFilter.getValue()), true)
.getBytesForLookup();
return getEntityByTime(base, entityType, limit, windowStart, windowEnd,
fromId, fromTs, secondaryFilters, fields, checkAcl, true);
}
}
/**
* Retrieves a list of entities satisfying given parameters.
*
* @param base
* A byte array prefix for the lookup
* @param entityType
* The type of the entity
* @param limit
* A limit on the number of entities to return
* @param starttime
* The earliest entity start time to retrieve (exclusive)
* @param endtime
* The latest entity start time to retrieve (inclusive)
* @param fromId
* Retrieve entities starting with this entity
* @param fromTs
* Ignore entities with insert timestamp later than this ts
* @param secondaryFilters
* Filter pairs that the entities should match
* @param fields
* The set of fields to retrieve
* @param usingPrimaryFilter
* true if this query is using a primary filter
* @return A list of entities
* @throws IOException
*/
private TimelineEntities getEntityByTime(byte[] base, String entityType,
Long limit, Long starttime, Long endtime, String fromId, Long fromTs,
Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields,
CheckAcl checkAcl, boolean usingPrimaryFilter) throws IOException {
DBIterator iterator = null;
try {
KeyBuilder kb = KeyBuilder.newInstance().add(base).add(entityType);
// only db keys matching the prefix (base + entity type) will be parsed
byte[] prefix = kb.getBytesForLookup();
if (endtime == null) {
// if end time is null, place no restriction on end time
endtime = Long.MAX_VALUE;
}
// Sanitize the fields parameter
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
// construct a first key that will be seeked to using end time or fromId
long firstStartTime = Long.MAX_VALUE;
byte[] first = null;
if (fromId != null) {
Long fromIdStartTime = getStartTimeLong(fromId, entityType);
if (fromIdStartTime == null) {
// no start time for provided id, so return empty entities
return new TimelineEntities();
}
if (fromIdStartTime <= endtime) {
// if provided id's start time falls before the end of the window,
// use it to construct the seek key
firstStartTime = fromIdStartTime;
first = kb.add(writeReverseOrderedLong(fromIdStartTime)).add(fromId)
.getBytesForLookup();
}
}
// if seek key wasn't constructed using fromId, construct it using end ts
if (first == null) {
firstStartTime = endtime;
first = kb.add(writeReverseOrderedLong(endtime)).getBytesForLookup();
}
byte[] last = null;
if (starttime != null) {
// if start time is not null, set a last key that will not be
// iterated past
last = KeyBuilder.newInstance().add(base).add(entityType)
.add(writeReverseOrderedLong(starttime)).getBytesForLookup();
}
if (limit == null) {
// if limit is not specified, use the default
limit = DEFAULT_LIMIT;
}
TimelineEntities entities = new TimelineEntities();
RollingLevelDB rollingdb = null;
if (usingPrimaryFilter) {
rollingdb = indexdb;
} else {
rollingdb = entitydb;
}
DB db = rollingdb.getDBForStartTime(firstStartTime);
while (entities.getEntities().size() < limit && db != null) {
iterator = db.iterator();
iterator.seek(first);
// iterate until one of the following conditions is met: limit is
// reached, there are no more keys, the key prefix no longer matches,
// or a start time has been specified and reached/exceeded
while (entities.getEntities().size() < limit && iterator.hasNext()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)
|| (last != null && WritableComparator.compareBytes(key, 0,
key.length, last, 0, last.length) > 0)) {
break;
}
// read the start time and entity id from the current key
KeyParser kp = new KeyParser(key, prefix.length);
Long startTime = kp.getNextLong();
String entityId = kp.getNextString();
if (fromTs != null) {
long insertTime = readReverseOrderedLong(iterator.peekNext()
.getValue(), 0);
if (insertTime > fromTs) {
byte[] firstKey = key;
while (iterator.hasNext()) {
key = iterator.peekNext().getKey();
iterator.next();
if (!prefixMatches(firstKey, kp.getOffset(), key)) {
break;
}
}
continue;
}
}
// Even if other info and primary filter fields are not included, we
// still need to load them to match secondary filters when they are
// non-empty
EnumSet<Field> queryFields = EnumSet.copyOf(fields);
boolean addPrimaryFilters = false;
boolean addOtherInfo = false;
if (secondaryFilters != null && secondaryFilters.size() > 0) {
if (!queryFields.contains(Field.PRIMARY_FILTERS)) {
queryFields.add(Field.PRIMARY_FILTERS);
addPrimaryFilters = true;
}
if (!queryFields.contains(Field.OTHER_INFO)) {
queryFields.add(Field.OTHER_INFO);
addOtherInfo = true;
}
}
// parse the entity that owns this key, iterating over all keys for
// the entity
TimelineEntity entity = null;
if (usingPrimaryFilter) {
entity = getEntity(entityId, entityType, queryFields);
iterator.next();
} else {
entity = getEntity(entityId, entityType, startTime, queryFields,
iterator, key, kp.getOffset());
}
// determine if the retrieved entity matches the provided secondary
// filters, and if so add it to the list of entities to return
boolean filterPassed = true;
if (secondaryFilters != null) {
for (NameValuePair filter : secondaryFilters) {
Object v = entity.getOtherInfo().get(filter.getName());
if (v == null) {
Set<Object> vs = entity.getPrimaryFilters()
.get(filter.getName());
if (vs == null || !vs.contains(filter.getValue())) {
filterPassed = false;
break;
}
} else if (!v.equals(filter.getValue())) {
filterPassed = false;
break;
}
}
}
if (filterPassed) {
if (entity.getDomainId() == null) {
entity.setDomainId(DEFAULT_DOMAIN_ID);
}
if (checkAcl == null || checkAcl.check(entity)) {
// Remove primary filter and other info if they are added for
// matching secondary filters
if (addPrimaryFilters) {
entity.setPrimaryFilters(null);
}
if (addOtherInfo) {
entity.setOtherInfo(null);
}
entities.addEntity(entity);
}
}
}
db = rollingdb.getPreviousDB(db);
}
return entities;
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
/**
* Put a single entity. If there is an error, add a TimelinePutError to the
* given response.
*
* @param entityUpdates
* a map containing all the scheduled writes for this put to the
* entity db
* @param indexUpdates
* a map containing all the scheduled writes for this put to the
* index db
*/
private long putEntities(TreeMap<Long, RollingWriteBatch> entityUpdates,
TreeMap<Long, RollingWriteBatch> indexUpdates, TimelineEntity entity,
TimelinePutResponse response) {
long putCount = 0;
List<EntityIdentifier> relatedEntitiesWithoutStartTimes =
new ArrayList<EntityIdentifier>();
byte[] revStartTime = null;
Map<String, Set<Object>> primaryFilters = null;
try {
List<TimelineEvent> events = entity.getEvents();
// look up the start time for the entity
Long startTime = getAndSetStartTime(entity.getEntityId(),
entity.getEntityType(), entity.getStartTime(), events);
if (startTime == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.NO_START_TIME);
response.addError(error);
return putCount;
}
// Must have a domain
if (StringUtils.isEmpty(entity.getDomainId())) {
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.NO_DOMAIN);
response.addError(error);
return putCount;
}
revStartTime = writeReverseOrderedLong(startTime);
long roundedStartTime = entitydb.computeCurrentCheckMillis(startTime);
RollingWriteBatch rollingWriteBatch = entityUpdates.get(roundedStartTime);
if (rollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(startTime);
if (db != null) {
WriteBatch writeBatch = db.createWriteBatch();
rollingWriteBatch = new RollingWriteBatch(db, writeBatch);
entityUpdates.put(roundedStartTime, rollingWriteBatch);
}
}
if (rollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
return putCount;
}
WriteBatch writeBatch = rollingWriteBatch.getWriteBatch();
// Save off the getBytes conversion to avoid unnecessary cost
byte[] entityIdBytes = entity.getEntityId().getBytes(UTF_8);
byte[] entityTypeBytes = entity.getEntityType().getBytes(UTF_8);
byte[] domainIdBytes = entity.getDomainId().getBytes(UTF_8);
// write entity marker
byte[] markerKey = KeyBuilder.newInstance(3).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).getBytesForLookup();
writeBatch.put(markerKey, EMPTY_BYTES);
++putCount;
// write domain id entry
byte[] domainkey = KeyBuilder.newInstance(4).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).add(DOMAIN_ID_COLUMN)
.getBytes();
writeBatch.put(domainkey, domainIdBytes);
++putCount;
// write event entries
if (events != null) {
for (TimelineEvent event : events) {
byte[] revts = writeReverseOrderedLong(event.getTimestamp());
byte[] key = KeyBuilder.newInstance().add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true).add(EVENTS_COLUMN)
.add(revts).add(event.getEventType().getBytes(UTF_8)).getBytes();
byte[] value = fstConf.asByteArray(event.getEventInfo());
writeBatch.put(key, value);
++putCount;
}
}
// write primary filter entries
primaryFilters = entity.getPrimaryFilters();
if (primaryFilters != null) {
for (Entry<String, Set<Object>> primaryFilter : primaryFilters
.entrySet()) {
for (Object primaryFilterValue : primaryFilter.getValue()) {
byte[] key = KeyBuilder.newInstance(6).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true)
.add(PRIMARY_FILTERS_COLUMN).add(primaryFilter.getKey())
.add(fstConf.asByteArray(primaryFilterValue)).getBytes();
writeBatch.put(key, EMPTY_BYTES);
++putCount;
}
}
}
// write other info entries
Map<String, Object> otherInfo = entity.getOtherInfo();
if (otherInfo != null) {
for (Entry<String, Object> info : otherInfo.entrySet()) {
byte[] key = KeyBuilder.newInstance(5).add(entityTypeBytes, true)
.add(revStartTime).add(entityIdBytes, true)
.add(OTHER_INFO_COLUMN).add(info.getKey()).getBytes();
byte[] value = fstConf.asByteArray(info.getValue());
writeBatch.put(key, value);
++putCount;
}
}
// write related entity entries
Map<String, Set<String>> relatedEntities = entity.getRelatedEntities();
if (relatedEntities != null) {
for (Entry<String, Set<String>> relatedEntityList : relatedEntities
.entrySet()) {
String relatedEntityType = relatedEntityList.getKey();
for (String relatedEntityId : relatedEntityList.getValue()) {
// look up start time of related entity
Long relatedStartTimeLong = getStartTimeLong(relatedEntityId,
relatedEntityType);
// delay writing the related entity if no start time is found
if (relatedStartTimeLong == null) {
relatedEntitiesWithoutStartTimes.add(new EntityIdentifier(
relatedEntityId, relatedEntityType));
continue;
}
byte[] relatedEntityStartTime =
writeReverseOrderedLong(relatedStartTimeLong);
long relatedRoundedStartTime = entitydb
.computeCurrentCheckMillis(relatedStartTimeLong);
RollingWriteBatch relatedRollingWriteBatch = entityUpdates
.get(relatedRoundedStartTime);
if (relatedRollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
if (db != null) {
WriteBatch relatedWriteBatch = db.createWriteBatch();
relatedRollingWriteBatch = new RollingWriteBatch(db,
relatedWriteBatch);
entityUpdates.put(relatedRoundedStartTime,
relatedRollingWriteBatch);
}
}
if (relatedRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
continue;
}
// This is the existing entity
byte[] relatedDomainIdBytes = relatedRollingWriteBatch.getDB().get(
createDomainIdKey(relatedEntityId, relatedEntityType,
relatedEntityStartTime));
// The timeline data created by the server before 2.6 won't have
// the domain field. We assume this timeline data is in the
// default timeline domain.
String domainId = null;
if (relatedDomainIdBytes == null) {
domainId = TimelineDataManager.DEFAULT_DOMAIN_ID;
} else {
domainId = new String(relatedDomainIdBytes, UTF_8);
}
if (!domainId.equals(entity.getDomainId())) {
// in this case the entity will be put, but the relation will be
// ignored
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.FORBIDDEN_RELATION);
response.addError(error);
continue;
}
// write "forward" entry (related entity -> entity)
byte[] key = createRelatedEntityKey(relatedEntityId,
relatedEntityType, relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType());
WriteBatch relatedWriteBatch = relatedRollingWriteBatch
.getWriteBatch();
relatedWriteBatch.put(key, EMPTY_BYTES);
++putCount;
}
}
}
// write index entities
RollingWriteBatch indexRollingWriteBatch = indexUpdates
.get(roundedStartTime);
if (indexRollingWriteBatch == null) {
DB db = indexdb.getDBForStartTime(startTime);
if (db != null) {
WriteBatch indexWriteBatch = db.createWriteBatch();
indexRollingWriteBatch = new RollingWriteBatch(db, indexWriteBatch);
indexUpdates.put(roundedStartTime, indexRollingWriteBatch);
}
}
if (indexRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
return putCount;
}
WriteBatch indexWriteBatch = indexRollingWriteBatch.getWriteBatch();
putCount += writePrimaryFilterEntries(indexWriteBatch, primaryFilters,
markerKey, EMPTY_BYTES);
} catch (IOException e) {
LOG.error("Error putting entity " + entity.getEntityId() + " of type "
+ entity.getEntityType(), e);
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.addError(error);
}
for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
try {
Long relatedEntityStartAndInsertTime = getAndSetStartTime(
relatedEntity.getId(), relatedEntity.getType(),
readReverseOrderedLong(revStartTime, 0), null);
if (relatedEntityStartAndInsertTime == null) {
throw new IOException("Error setting start time for related entity");
}
long relatedStartTimeLong = relatedEntityStartAndInsertTime;
long relatedRoundedStartTime = entitydb
.computeCurrentCheckMillis(relatedStartTimeLong);
RollingWriteBatch relatedRollingWriteBatch = entityUpdates
.get(relatedRoundedStartTime);
if (relatedRollingWriteBatch == null) {
DB db = entitydb.getDBForStartTime(relatedStartTimeLong);
if (db != null) {
WriteBatch relatedWriteBatch = db.createWriteBatch();
relatedRollingWriteBatch = new RollingWriteBatch(db,
relatedWriteBatch);
entityUpdates
.put(relatedRoundedStartTime, relatedRollingWriteBatch);
}
}
if (relatedRollingWriteBatch == null) {
// if no start time is found, add an error and return
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.EXPIRED_ENTITY);
response.addError(error);
continue;
}
WriteBatch relatedWriteBatch = relatedRollingWriteBatch.getWriteBatch();
byte[] relatedEntityStartTime =
writeReverseOrderedLong(relatedEntityStartAndInsertTime);
// This is the new entity, the domain should be the same
byte[] key = createDomainIdKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime);
relatedWriteBatch.put(key, entity.getDomainId().getBytes(UTF_8));
++putCount;
relatedWriteBatch.put(
createRelatedEntityKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime,
entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES);
++putCount;
relatedWriteBatch.put(
createEntityMarkerKey(relatedEntity.getId(),
relatedEntity.getType(), relatedEntityStartTime), EMPTY_BYTES);
++putCount;
} catch (IOException e) {
LOG.error(
"Error putting related entity " + relatedEntity.getId()
+ " of type " + relatedEntity.getType() + " for entity "
+ entity.getEntityId() + " of type " + entity.getEntityType(),
e);
TimelinePutError error = new TimelinePutError();
error.setEntityId(entity.getEntityId());
error.setEntityType(entity.getEntityType());
error.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.addError(error);
}
}
return putCount;
}
/**
* For a given key / value pair that has been written to the db, write
* additional entries to the db for each primary filter.
*/
private static long writePrimaryFilterEntries(WriteBatch writeBatch,
Map<String, Set<Object>> primaryFilters, byte[] key, byte[] value)
throws IOException {
long putCount = 0;
if (primaryFilters != null) {
for (Entry<String, Set<Object>> pf : primaryFilters.entrySet()) {
for (Object pfval : pf.getValue()) {
writeBatch.put(addPrimaryFilterToKey(pf.getKey(), pfval, key), value);
++putCount;
}
}
}
return putCount;
}
@Override
public TimelinePutResponse put(TimelineEntities entities) {
if (LOG.isDebugEnabled()) {
LOG.debug("Starting put");
}
TimelinePutResponse response = new TimelinePutResponse();
TreeMap<Long, RollingWriteBatch> entityUpdates =
new TreeMap<Long, RollingWriteBatch>();
TreeMap<Long, RollingWriteBatch> indexUpdates =
new TreeMap<Long, RollingWriteBatch>();
long entityCount = 0;
long indexCount = 0;
try {
for (TimelineEntity entity : entities.getEntities()) {
entityCount += putEntities(entityUpdates, indexUpdates, entity,
response);
}
for (RollingWriteBatch entityUpdate : entityUpdates.values()) {
entityUpdate.write();
}
for (RollingWriteBatch indexUpdate : indexUpdates.values()) {
indexUpdate.write();
}
} finally {
for (RollingWriteBatch entityRollingWriteBatch : entityUpdates.values()) {
entityRollingWriteBatch.close();
}
for (RollingWriteBatch indexRollingWriteBatch : indexUpdates.values()) {
indexRollingWriteBatch.close();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Put " + entityCount + " new leveldb entity entries and "
+ indexCount + " new leveldb index entries from "
+ entities.getEntities().size() + " timeline entities");
}
return response;
}
/**
* Get the unique start time for a given entity as a byte array that sorts the
* timestamps in reverse order (see
* {@link GenericObjectMapper#writeReverseOrderedLong(long)}).
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @return A byte array, null if not found
* @throws IOException
*/
private byte[] getStartTime(String entityId, String entityType)
throws IOException {
Long l = getStartTimeLong(entityId, entityType);
return l == null ? null : writeReverseOrderedLong(l);
}
/**
* Get the unique start time for a given entity as a Long.
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @return A Long, null if not found
* @throws IOException
*/
private Long getStartTimeLong(String entityId, String entityType)
throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
// start time is not provided, so try to look it up
if (startTimeReadCache.containsKey(entity)) {
// found the start time in the cache
return startTimeReadCache.get(entity);
} else {
// try to look up the start time in the db
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
byte[] v = starttimedb.get(b);
if (v == null) {
// did not find the start time in the db
return null;
} else {
// found the start time in the db
Long l = readReverseOrderedLong(v, 0);
startTimeReadCache.put(entity, l);
return l;
}
}
}
/**
* Get the unique start time for a given entity as a byte array that sorts the
* timestamps in reverse order (see
* {@link GenericObjectMapper#writeReverseOrderedLong(long)}). If the start
* time doesn't exist, set it based on the information provided. Should only
* be called when a lock has been obtained on the entity.
*
* @param entityId
* The id of the entity
* @param entityType
* The type of the entity
* @param startTime
* The start time of the entity, or null
* @param events
* A list of events for the entity, or null
* @return A StartAndInsertTime
* @throws IOException
*/
private Long getAndSetStartTime(String entityId, String entityType,
Long startTime, List<TimelineEvent> events) throws IOException {
EntityIdentifier entity = new EntityIdentifier(entityId, entityType);
Long time = startTimeWriteCache.get(entity);
if (time != null) {
// return the value in the cache
return time;
}
if (startTime == null && events != null) {
// calculate best guess start time based on lowest event time
startTime = Long.MAX_VALUE;
for (TimelineEvent e : events) {
if (e.getTimestamp() < startTime) {
startTime = e.getTimestamp();
}
}
}
// check the provided start time matches the db
return checkStartTimeInDb(entity, startTime);
}
/**
* Checks db for start time and returns it if it exists. If it doesn't exist,
* writes the suggested start time (if it is not null). This is only called
* when the start time is not found in the cache, so it adds it back into the
* cache if it is found. Should only be called when a lock has been obtained
* on the entity.
*/
private Long checkStartTimeInDb(EntityIdentifier entity,
Long suggestedStartTime) throws IOException {
Long startAndInsertTime = null;
// create lookup key for start time
byte[] b = createStartTimeLookupKey(entity.getId(), entity.getType());
// retrieve value for key
byte[] v = starttimedb.get(b);
if (v == null) {
// start time doesn't exist in db
if (suggestedStartTime == null) {
return null;
}
startAndInsertTime = suggestedStartTime;
// write suggested start time
starttimedb.put(b, writeReverseOrderedLong(suggestedStartTime));
} else {
// found start time in db, so ignore suggested start time
startAndInsertTime = readReverseOrderedLong(v, 0);
}
startTimeWriteCache.put(entity, startAndInsertTime);
startTimeReadCache.put(entity, startAndInsertTime);
return startAndInsertTime;
}
/**
* Creates a key for looking up the start time of a given entity, of the form
* START_TIME_LOOKUP_PREFIX + entity type + entity id.
*/
private static byte[] createStartTimeLookupKey(String entityId,
String entityType) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(entityId).getBytes();
}
/**
* Creates an entity marker, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id.
*/
private static byte[] createEntityMarkerKey(String entityId,
String entityType, byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).getBytesForLookup();
}
/**
* Creates an index entry for the given key of the form INDEXED_ENTRY_PREFIX +
* primaryfiltername + primaryfiltervalue + key.
*/
private static byte[] addPrimaryFilterToKey(String primaryFilterName,
Object primaryFilterValue, byte[] key) throws IOException {
return KeyBuilder.newInstance().add(primaryFilterName)
.add(fstConf.asByteArray(primaryFilterValue), true).add(key).getBytes();
}
/**
* Creates an event object from the given key, offset, and value. If the event
* type is not contained in the specified set of event types, returns null.
*/
private static TimelineEvent getEntityEvent(Set<String> eventTypes,
byte[] key, int offset, byte[] value) throws IOException {
KeyParser kp = new KeyParser(key, offset);
long ts = kp.getNextLong();
String tstype = kp.getNextString();
if (eventTypes == null || eventTypes.contains(tstype)) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(ts);
event.setEventType(tstype);
Object o = fstConf.asObject(value);
if (o == null) {
event.setEventInfo(null);
} else if (o instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> m = (Map<String, Object>) o;
event.setEventInfo(m);
} else {
throw new IOException("Couldn't deserialize event info map");
}
return event;
}
return null;
}
/**
* Parses the primary filter from the given key at the given offset and adds
* it to the given entity.
*/
private static void addPrimaryFilter(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String name = kp.getNextString();
byte[] bytes = kp.getRemainingBytes();
Object value = fstConf.asObject(bytes);
entity.addPrimaryFilter(name, value);
}
/**
* Creates a string representation of the byte array from the given offset to
* the end of the array (for parsing other info keys).
*/
private static String parseRemainingKey(byte[] b, int offset) {
return new String(b, offset, b.length - offset, UTF_8);
}
/**
* Creates a related entity key, serializing ENTITY_ENTRY_PREFIX + entity type
* + revstarttime + entity id + RELATED_ENTITIES_COLUMN + relatedentity type +
* relatedentity id.
*/
private static byte[] createRelatedEntityKey(String entityId,
String entityType, byte[] revStartTime, String relatedEntityId,
String relatedEntityType) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).add(RELATED_ENTITIES_COLUMN).add(relatedEntityType)
.add(relatedEntityId).getBytes();
}
/**
* Parses the related entity from the given key at the given offset and adds
* it to the given entity.
*/
private static void addRelatedEntity(TimelineEntity entity, byte[] key,
int offset) throws IOException {
KeyParser kp = new KeyParser(key, offset);
String type = kp.getNextString();
String id = kp.getNextString();
entity.addRelatedEntity(type, id);
}
/**
* Creates a domain id key, serializing ENTITY_ENTRY_PREFIX + entity type +
* revstarttime + entity id + DOMAIN_ID_COLUMN.
*/
private static byte[] createDomainIdKey(String entityId, String entityType,
byte[] revStartTime) throws IOException {
return KeyBuilder.newInstance().add(entityType).add(revStartTime)
.add(entityId).add(DOMAIN_ID_COLUMN).getBytes();
}
/**
* Clears the cache to test reloading start times from leveldb (only for
* testing).
*/
@VisibleForTesting
void clearStartTimeCache() {
startTimeWriteCache.clear();
startTimeReadCache.clear();
}
@VisibleForTesting
static int getStartTimeReadCacheSize(Configuration conf) {
return conf
.getInt(
TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
}
@VisibleForTesting
static int getStartTimeWriteCacheSize(Configuration conf) {
return conf
.getInt(
TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
}
@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
LOG.info("Searching for start times to evict earlier than " + minStartTime);
long batchSize = 0;
long totalCount = 0;
long startTimesCount = 0;
WriteBatch writeBatch = null;
DBIterator iterator = null;
try {
writeBatch = starttimedb.createWriteBatch();
ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(false);
iterator = starttimedb.iterator(readOptions);
// seek to the first start time entry
iterator.seekToFirst();
// evaluate each start time entry to see if it needs to be evicted or not
while (iterator.hasNext()) {
Map.Entry<byte[], byte[]> current = iterator.next();
byte[] entityKey = current.getKey();
byte[] entityValue = current.getValue();
long startTime = readReverseOrderedLong(entityValue, 0);
if (startTime < minStartTime) {
++batchSize;
++startTimesCount;
writeBatch.delete(entityKey);
// a large delete will hold the lock for too long
if (batchSize >= writeBatchSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize
+ " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize
+ ". Total start times deleted so far this cycle: "
+ startTimesCount);
}
IOUtils.cleanup(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
}
++totalCount;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize
+ " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize
+ ". Total start times deleted so far this cycle: "
+ startTimesCount);
}
LOG.info("Deleted " + startTimesCount + "/" + totalCount
+ " start time entities earlier than " + minStartTime);
} finally {
IOUtils.cleanup(LOG, writeBatch);
IOUtils.cleanup(LOG, iterator);
}
return startTimesCount;
}
/**
* Discards entities with start timestamp less than or equal to the given
* timestamp.
*/
@VisibleForTesting
void discardOldEntities(long timestamp) throws IOException,
InterruptedException {
long totalCount = 0;
long t1 = System.currentTimeMillis();
try {
totalCount += evictOldStartTimes(timestamp);
indexdb.evictOldDBs();
entitydb.evictOldDBs();
} finally {
long t2 = System.currentTimeMillis();
LOG.info("Discarded " + totalCount + " entities for timestamp "
+ timestamp + " and earlier in " + (t2 - t1) / 1000.0 + " seconds");
}
}
Version loadVersion() throws IOException {
byte[] data = starttimedb.get(bytes(TIMELINE_STORE_VERSION_KEY));
// if version is not stored previously, treat it as 1.0.
if (data == null || data.length == 0) {
return Version.newInstance(1, 0);
}
Version version = new VersionPBImpl(VersionProto.parseFrom(data));
return version;
}
// Only used for test
@VisibleForTesting
void storeVersion(Version state) throws IOException {
dbStoreVersion(state);
}
private void dbStoreVersion(Version state) throws IOException {
String key = TIMELINE_STORE_VERSION_KEY;
byte[] data = ((VersionPBImpl) state).getProto().toByteArray();
try {
starttimedb.put(bytes(key), data);
} catch (DBException e) {
throw new IOException(e);
}
}
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning timeline store: major.minor. For e.g. 1.0, 1.1, 1.2...1.25,
* 2.0 etc. 2) Any incompatible change of TS-store is a major upgrade, and any
* compatible change of TS-store is a minor upgrade. 3) Within a minor
* upgrade, say 1.1 to 1.2: overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0: throw exception and indicate
* user to use a separate upgrade tool to upgrade timeline store or remove
* incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded timeline store version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing timeline store version info " + getCurrentVersion());
dbStoreVersion(CURRENT_VERSION_INFO);
} else {
String incompatibleMessage = "Incompatible version for timeline store: "
+ "expecting version " + getCurrentVersion()
+ ", but loading version " + loadedVersion;
LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
// TODO: make data retention work with the domain data as well
@Override
public void put(TimelineDomain domain) throws IOException {
WriteBatch domainWriteBatch = null;
WriteBatch ownerWriteBatch = null;
try {
domainWriteBatch = domaindb.createWriteBatch();
ownerWriteBatch = ownerdb.createWriteBatch();
if (domain.getId() == null || domain.getId().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an ID");
}
if (domain.getOwner() == null || domain.getOwner().length() == 0) {
throw new IllegalArgumentException("Domain doesn't have an owner.");
}
// Write description
byte[] domainEntryKey = createDomainEntryKey(domain.getId(),
DESCRIPTION_COLUMN);
byte[] ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), DESCRIPTION_COLUMN);
if (domain.getDescription() != null) {
domainWriteBatch.put(domainEntryKey,
domain.getDescription().getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey, domain.getDescription()
.getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write owner
domainEntryKey = createDomainEntryKey(domain.getId(), OWNER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), OWNER_COLUMN);
// Null check for owner is done before
domainWriteBatch.put(domainEntryKey, domain.getOwner().getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey, domain.getOwner()
.getBytes(UTF_8));
// Write readers
domainEntryKey = createDomainEntryKey(domain.getId(), READER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), READER_COLUMN);
if (domain.getReaders() != null && domain.getReaders().length() > 0) {
domainWriteBatch.put(domainEntryKey, domain.getReaders()
.getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey,
domain.getReaders().getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write writers
domainEntryKey = createDomainEntryKey(domain.getId(), WRITER_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), WRITER_COLUMN);
if (domain.getWriters() != null && domain.getWriters().length() > 0) {
domainWriteBatch.put(domainEntryKey, domain.getWriters()
.getBytes(UTF_8));
ownerWriteBatch.put(ownerLookupEntryKey,
domain.getWriters().getBytes(UTF_8));
} else {
domainWriteBatch.put(domainEntryKey, EMPTY_BYTES);
ownerWriteBatch.put(ownerLookupEntryKey, EMPTY_BYTES);
}
// Write creation time and modification time
// We put both timestamps together because they are always retrieved
// together, and store them in the same way as we did for the entity's
// start time and insert time.
domainEntryKey = createDomainEntryKey(domain.getId(), TIMESTAMP_COLUMN);
ownerLookupEntryKey = createOwnerLookupKey(domain.getOwner(),
domain.getId(), TIMESTAMP_COLUMN);
long currentTimestamp = System.currentTimeMillis();
byte[] timestamps = domaindb.get(domainEntryKey);
if (timestamps == null) {
timestamps = new byte[16];
writeReverseOrderedLong(currentTimestamp, timestamps, 0);
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
} else {
writeReverseOrderedLong(currentTimestamp, timestamps, 8);
}
domainWriteBatch.put(domainEntryKey, timestamps);
ownerWriteBatch.put(ownerLookupEntryKey, timestamps);
domaindb.write(domainWriteBatch);
ownerdb.write(ownerWriteBatch);
} finally {
IOUtils.cleanup(LOG, domainWriteBatch);
IOUtils.cleanup(LOG, ownerWriteBatch);
}
}
/**
* Creates a domain entity key with column name suffix, of the form
* DOMAIN_ENTRY_PREFIX + domain id + column name.
*/
private static byte[] createDomainEntryKey(String domainId, byte[] columnName)
throws IOException {
return KeyBuilder.newInstance().add(domainId).add(columnName).getBytes();
}
/**
* Creates an owner lookup key with column name suffix, of the form
* OWNER_LOOKUP_PREFIX + owner + domain id + column name.
*/
private static byte[] createOwnerLookupKey(String owner, String domainId,
byte[] columnName) throws IOException {
return KeyBuilder.newInstance().add(owner).add(domainId).add(columnName)
.getBytes();
}
@Override
public TimelineDomain getDomain(String domainId) throws IOException {
DBIterator iterator = null;
try {
byte[] prefix = KeyBuilder.newInstance().add(domainId)
.getBytesForLookup();
iterator = domaindb.iterator();
iterator.seek(prefix);
return getTimelineDomain(iterator, domainId, prefix);
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
@Override
public TimelineDomains getDomains(String owner) throws IOException {
DBIterator iterator = null;
try {
byte[] prefix = KeyBuilder.newInstance().add(owner).getBytesForLookup();
List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
for (iterator = ownerdb.iterator(), iterator.seek(prefix); iterator
.hasNext();) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
// Iterator to parse the rows of an individual domain
KeyParser kp = new KeyParser(key, prefix.length);
String domainId = kp.getNextString();
byte[] prefixExt = KeyBuilder.newInstance().add(owner).add(domainId)
.getBytesForLookup();
TimelineDomain domainToReturn = getTimelineDomain(iterator, domainId,
prefixExt);
if (domainToReturn != null) {
domains.add(domainToReturn);
}
}
// Sort the domains to return
Collections.sort(domains, new Comparator<TimelineDomain>() {
@Override
public int compare(TimelineDomain domain1, TimelineDomain domain2) {
int result = domain2.getCreatedTime().compareTo(
domain1.getCreatedTime());
if (result == 0) {
return domain2.getModifiedTime().compareTo(
domain1.getModifiedTime());
} else {
return result;
}
}
});
TimelineDomains domainsToReturn = new TimelineDomains();
domainsToReturn.addDomains(domains);
return domainsToReturn;
} finally {
IOUtils.cleanup(LOG, iterator);
}
}
private static TimelineDomain getTimelineDomain(DBIterator iterator,
String domainId, byte[] prefix) throws IOException {
// Iterate over all the rows whose key starts with prefix to retrieve the
// domain information.
TimelineDomain domain = new TimelineDomain();
domain.setId(domainId);
boolean noRows = true;
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(prefix, prefix.length, key)) {
break;
}
if (noRows) {
noRows = false;
}
byte[] value = iterator.peekNext().getValue();
if (value != null && value.length > 0) {
if (key[prefix.length] == DESCRIPTION_COLUMN[0]) {
domain.setDescription(new String(value, UTF_8));
} else if (key[prefix.length] == OWNER_COLUMN[0]) {
domain.setOwner(new String(value, UTF_8));
} else if (key[prefix.length] == READER_COLUMN[0]) {
domain.setReaders(new String(value, UTF_8));
} else if (key[prefix.length] == WRITER_COLUMN[0]) {
domain.setWriters(new String(value, UTF_8));
} else if (key[prefix.length] == TIMESTAMP_COLUMN[0]) {
domain.setCreatedTime(readReverseOrderedLong(value, 0));
domain.setModifiedTime(readReverseOrderedLong(value, 8));
} else {
LOG.error("Unrecognized domain column: " + key[prefix.length]);
}
}
}
if (noRows) {
return null;
} else {
return domain;
}
}
}
| 71,670 | 38.64104 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline;
import java.io.IOException;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Set;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
/**
* This interface is for retrieving timeline information.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface TimelineReader {
/**
* Possible fields to retrieve for {@link #getEntities} and {@link #getEntity}
* .
*/
enum Field {
EVENTS,
RELATED_ENTITIES,
PRIMARY_FILTERS,
OTHER_INFO,
LAST_EVENT_ONLY
}
/**
* Default limit for {@link #getEntities} and {@link #getEntityTimelines}.
*/
final long DEFAULT_LIMIT = 100;
/**
* This method retrieves a list of entity information, {@link TimelineEntity},
* sorted by the starting timestamp for the entity, descending. The starting
* timestamp of an entity is a timestamp specified by the client. If it is not
* explicitly specified, it will be chosen by the store to be the earliest
* timestamp of the events received in the first put for the entity.
*
* @param entityType
* The type of entities to return (required).
* @param limit
* A limit on the number of entities to return. If null, defaults to
* {@link #DEFAULT_LIMIT}.
* @param windowStart
* The earliest start timestamp to retrieve (exclusive). If null,
* defaults to retrieving all entities until the limit is reached.
* @param windowEnd
* The latest start timestamp to retrieve (inclusive). If null,
* defaults to {@link Long#MAX_VALUE}
* @param fromId
* If fromId is not null, retrieve entities earlier than and
* including the specified ID. If no start time is found for the
* specified ID, an empty list of entities will be returned. The
* windowEnd parameter will take precedence if the start time of this
* entity falls later than windowEnd.
* @param fromTs
* If fromTs is not null, ignore entities that were inserted into the
* store after the given timestamp. The entity's insert timestamp
* used for this comparison is the store's system time when the first
* put for the entity was received (not the entity's start time).
* @param primaryFilter
* Retrieves only entities that have the specified primary filter. If
* null, retrieves all entities. This is an indexed retrieval, and no
* entities that do not match the filter are scanned.
* @param secondaryFilters
* Retrieves only entities that have exact matches for all the
* specified filters in their primary filters or other info. This is
* not an indexed retrieval, so all entities are scanned but only
* those matching the filters are returned.
* @param fieldsToRetrieve
* Specifies which fields of the entity object to retrieve (see
* {@link Field}). If the set of fields contains
* {@link Field#LAST_EVENT_ONLY} and not {@link Field#EVENTS}, the
* most recent event for each entity is retrieved. If null, retrieves
* all fields.
* @return An {@link TimelineEntities} object.
* @throws IOException
*/
TimelineEntities getEntities(String entityType,
Long limit, Long windowStart, Long windowEnd, String fromId, Long fromTs,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fieldsToRetrieve, CheckAcl checkAcl) throws IOException;
/**
* This method retrieves the entity information for a given entity.
*
* @param entityId
* The entity whose information will be retrieved.
* @param entityType
* The type of the entity.
* @param fieldsToRetrieve
* Specifies which fields of the entity object to retrieve (see
* {@link Field}). If the set of fields contains
* {@link Field#LAST_EVENT_ONLY} and not {@link Field#EVENTS}, the
* most recent event for each entity is retrieved. If null, retrieves
* all fields.
* @return An {@link TimelineEntity} object.
* @throws IOException
*/
TimelineEntity getEntity(String entityId, String entityType, EnumSet<Field>
fieldsToRetrieve) throws IOException;
/**
* This method retrieves the events for a list of entities all of the same
* entity type. The events for each entity are sorted in order of their
* timestamps, descending.
*
* @param entityType
* The type of entities to retrieve events for.
* @param entityIds
* The entity IDs to retrieve events for.
* @param limit
* A limit on the number of events to return for each entity. If
* null, defaults to {@link #DEFAULT_LIMIT} events per entity.
* @param windowStart
* If not null, retrieves only events later than the given time
* (exclusive)
* @param windowEnd
* If not null, retrieves only events earlier than the given time
* (inclusive)
* @param eventTypes
* Restricts the events returned to the given types. If null, events
* of all types will be returned.
* @return An {@link TimelineEvents} object.
* @throws IOException
*/
TimelineEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd, Set<String> eventTypes) throws IOException;
/**
* This method retrieves the domain information for a given ID.
*
* @return a {@link TimelineDomain} object.
* @throws IOException
*/
TimelineDomain getDomain(
String domainId) throws IOException;
/**
* This method retrieves all the domains that belong to a given owner.
* The domains are sorted according to the created time firstly and the
* modified time secondly in descending order.
*
* @param owner
* the domain owner
* @return an {@link TimelineDomains} object.
* @throws IOException
*/
TimelineDomains getDomains(String owner) throws IOException;
}
| 7,536 | 40.872222 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService.TimelineDelegationTokenSecretManager;
@Private
@Unstable
public class TimelineAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static TimelineDelegationTokenSecretManager secretManager;
@Override
public void init(FilterConfig filterConfig) throws ServletException {
filterConfig.getServletContext().setAttribute(
DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,
secretManager);
super.init(filterConfig);
}
public static void setTimelineDelegationTokenSecretManager(
TimelineDelegationTokenSecretManager secretManager) {
TimelineAuthenticationFilter.secretManager = secretManager;
}
}
| 1,958 | 38.18 | 136 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Initializes {@link TimelineAuthenticationFilter} which provides support for
* Kerberos HTTP SPNEGO authentication.
* <p>
* It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
* timeline server.
* <p>
* Refer to the {@code core-default.xml} file, after the comment 'HTTP
* Authentication' for details on the configuration options. All related
* configuration properties have {@code hadoop.http.authentication.} as prefix.
*/
public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
/**
* The configuration prefix of timeline HTTP authentication
*/
public static final String PREFIX = "yarn.timeline-service.http-authentication.";
@VisibleForTesting
Map<String, String> filterConfig;
/**
* Initializes {@link TimelineAuthenticationFilter}
* <p>
* Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
* configuration properties prefixed with {@value #PREFIX}
*
* @param container
* The filter container
* @param conf
* Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
filterConfig = new HashMap<String, String>();
// setting the cookie path to root '/' so it is used for all resources.
filterConfig.put(TimelineAuthenticationFilter.COOKIE_PATH, "/");
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
String value = conf.get(name);
name = name.substring("hadoop.".length());
filterConfig.put(name, value);
}
}
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(PREFIX)) {
// yarn.timeline-service.http-authentication.proxyuser will override
// hadoop.proxyuser
String value = conf.get(name);
name = name.substring(PREFIX.length());
filterConfig.put(name, value);
}
}
String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
// Resolve _HOST into bind address
String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
String principal =
filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
if (principal != null) {
try {
principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
} catch (IOException ex) {
throw new RuntimeException(
"Could not resolve Kerberos principal name: " + ex.toString(), ex);
}
filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
principal);
}
}
filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
TimelineDelegationTokenIdentifier.KIND_NAME.toString());
container.addGlobalFilter("Timeline Authentication Filter",
TimelineAuthenticationFilter.class.getName(),
filterConfig);
}
}
| 5,301 | 39.784615 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import java.io.IOException;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
/**
* The service wrapper of {@link TimelineDelegationTokenSecretManager}
*/
@Private
@Unstable
public class TimelineDelegationTokenSecretManagerService extends
AbstractService {
private TimelineDelegationTokenSecretManager secretManager = null;
private TimelineStateStore stateStore = null;
public TimelineDelegationTokenSecretManagerService() {
super(TimelineDelegationTokenSecretManagerService.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_RECOVERY_ENABLED)) {
stateStore = createStateStore(conf);
stateStore.init(conf);
}
long secretKeyInterval =
conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL,
YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL);
long tokenMaxLifetime =
conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME,
YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME);
long tokenRenewInterval =
conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
secretManager = new TimelineDelegationTokenSecretManager(secretKeyInterval,
tokenMaxLifetime, tokenRenewInterval, 3600000, stateStore);
super.init(conf);
}
@Override
protected void serviceStart() throws Exception {
if (stateStore != null) {
stateStore.start();
TimelineServiceState state = stateStore.loadState();
secretManager.recover(state);
}
secretManager.startThreads();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (stateStore != null) {
stateStore.stop();
}
secretManager.stopThreads();
super.stop();
}
protected TimelineStateStore createStateStore(
Configuration conf) {
return ReflectionUtils.newInstance(
conf.getClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
LeveldbTimelineStateStore.class,
TimelineStateStore.class), conf);
}
/**
* Ge the instance of {link #TimelineDelegationTokenSecretManager}
*
* @return the instance of {link #TimelineDelegationTokenSecretManager}
*/
public TimelineDelegationTokenSecretManager
getTimelineDelegationTokenSecretManager() {
return secretManager;
}
@Private
@Unstable
public static class TimelineDelegationTokenSecretManager extends
AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
public static final Log LOG =
LogFactory.getLog(TimelineDelegationTokenSecretManager.class);
private TimelineStateStore stateStore;
/**
* Create a timeline secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are
* scanned for expired tokens in milliseconds
* @param stateStore timeline service state store
*/
public TimelineDelegationTokenSecretManager(
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval,
TimelineStateStore stateStore) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.stateStore = stateStore;
}
@Override
public TimelineDelegationTokenIdentifier createIdentifier() {
return new TimelineDelegationTokenIdentifier();
}
@Override
protected void storeNewMasterKey(DelegationKey key) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + key.getKeyId());
}
try {
if (stateStore != null) {
stateStore.storeTokenMasterKey(key);
}
} catch (IOException e) {
LOG.error("Unable to store master key " + key.getKeyId(), e);
}
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing master key " + key.getKeyId());
}
try {
if (stateStore != null) {
stateStore.removeTokenMasterKey(key);
}
} catch (IOException e) {
LOG.error("Unable to remove master key " + key.getKeyId(), e);
}
}
@Override
protected void storeNewToken(TimelineDelegationTokenIdentifier tokenId,
long renewDate) {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
try {
if (stateStore != null) {
stateStore.storeToken(tokenId, renewDate);
}
} catch (IOException e) {
LOG.error("Unable to store token " + tokenId.getSequenceNumber(), e);
}
}
@Override
protected void removeStoredToken(TimelineDelegationTokenIdentifier tokenId)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
try {
if (stateStore != null) {
stateStore.removeToken(tokenId);
}
} catch (IOException e) {
LOG.error("Unable to remove token " + tokenId.getSequenceNumber(), e);
}
}
@Override
protected void updateStoredToken(TimelineDelegationTokenIdentifier tokenId,
long renewDate) {
if (LOG.isDebugEnabled()) {
LOG.debug("Updating token " + tokenId.getSequenceNumber());
}
try {
if (stateStore != null) {
stateStore.updateToken(tokenId, renewDate);
}
} catch (IOException e) {
LOG.error("Unable to update token " + tokenId.getSequenceNumber(), e);
}
}
public void recover(TimelineServiceState state) throws IOException {
LOG.info("Recovering " + getClass().getSimpleName());
for (DelegationKey key : state.getTokenMasterKeyState()) {
addKey(key);
}
this.delegationTokenSequenceNumber = state.getLatestSequenceNumber();
for (Entry<TimelineDelegationTokenIdentifier, Long> entry :
state.getTokenState().entrySet()) {
addPersistedDelegationToken(entry.getKey(), entry.getValue());
}
}
}
}
| 8,553 | 34.493776 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.util.StringHelper;
import com.google.common.annotations.VisibleForTesting;
/**
* <code>TimelineACLsManager</code> check the entity level timeline data access.
*/
@Private
public class TimelineACLsManager {
private static final Log LOG = LogFactory.getLog(TimelineACLsManager.class);
private static final int DOMAIN_ACCESS_ENTRY_CACHE_SIZE = 100;
private AdminACLsManager adminAclsManager;
private Map<String, AccessControlListExt> aclExts;
private TimelineStore store;
@SuppressWarnings("unchecked")
public TimelineACLsManager(Configuration conf) {
this.adminAclsManager = new AdminACLsManager(conf);
aclExts = Collections.synchronizedMap(
new LRUMap(DOMAIN_ACCESS_ENTRY_CACHE_SIZE));
}
public void setTimelineStore(TimelineStore store) {
this.store = store;
}
private AccessControlListExt loadDomainFromTimelineStore(
String domainId) throws IOException {
if (store == null) {
return null;
}
TimelineDomain domain = store.getDomain(domainId);
if (domain == null) {
return null;
} else {
return putDomainIntoCache(domain);
}
}
public void replaceIfExist(TimelineDomain domain) {
if (aclExts.containsKey(domain.getId())) {
putDomainIntoCache(domain);
}
}
private AccessControlListExt putDomainIntoCache(
TimelineDomain domain) {
Map<ApplicationAccessType, AccessControlList> acls
= new HashMap<ApplicationAccessType, AccessControlList>(2);
acls.put(ApplicationAccessType.VIEW_APP,
new AccessControlList(StringHelper.cjoin(domain.getReaders())));
acls.put(ApplicationAccessType.MODIFY_APP,
new AccessControlList(StringHelper.cjoin(domain.getWriters())));
AccessControlListExt aclExt =
new AccessControlListExt(domain.getOwner(), acls);
aclExts.put(domain.getId(), aclExt);
return aclExt;
}
public boolean checkAccess(UserGroupInformation callerUGI,
ApplicationAccessType applicationAccessType,
TimelineEntity entity) throws YarnException, IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Verifying the access of "
+ (callerUGI == null ? null : callerUGI.getShortUserName())
+ " on the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType()));
}
if (!adminAclsManager.areACLsEnabled()) {
return true;
}
// find domain owner and acls
AccessControlListExt aclExt = aclExts.get(entity.getDomainId());
if (aclExt == null) {
aclExt = loadDomainFromTimelineStore(entity.getDomainId());
}
if (aclExt == null) {
throw new YarnException("Domain information of the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType())
+ " doesn't exist.");
}
String owner = aclExt.owner;
AccessControlList domainACL = aclExt.acls.get(applicationAccessType);
if (domainACL == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("ACL not found for access-type " + applicationAccessType
+ " for domain " + entity.getDomainId() + " owned by "
+ owner + ". Using default ["
+ YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
}
domainACL =
new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
}
if (callerUGI != null
&& (adminAclsManager.isAdmin(callerUGI) ||
callerUGI.getShortUserName().equals(owner) ||
domainACL.isUserAllowed(callerUGI))) {
return true;
}
return false;
}
public boolean checkAccess(UserGroupInformation callerUGI,
TimelineDomain domain) throws YarnException, IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Verifying the access of "
+ (callerUGI == null ? null : callerUGI.getShortUserName())
+ " on the timeline domain " + domain);
}
if (!adminAclsManager.areACLsEnabled()) {
return true;
}
String owner = domain.getOwner();
if (owner == null || owner.length() == 0) {
throw new YarnException("Owner information of the timeline domain "
+ domain.getId() + " is corrupted.");
}
if (callerUGI != null
&& (adminAclsManager.isAdmin(callerUGI) ||
callerUGI.getShortUserName().equals(owner))) {
return true;
}
return false;
}
@Private
@VisibleForTesting
public AdminACLsManager
setAdminACLsManager(AdminACLsManager adminAclsManager) {
AdminACLsManager oldAdminACLsManager = this.adminAclsManager;
this.adminAclsManager = adminAclsManager;
return oldAdminACLsManager;
}
private static class AccessControlListExt {
private String owner;
private Map<ApplicationAccessType, AccessControlList> acls;
public AccessControlListExt(
String owner, Map<ApplicationAccessType, AccessControlList> acls) {
this.owner = owner;
this.acls = acls;
}
}
}
| 6,803 | 34.253886 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/authorize/TimelinePolicyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* {@link PolicyProvider} for YARN timeline server protocols.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TimelinePolicyProvider extends PolicyProvider {
@Override
public Service[] getServices() {
return new Service[] {
new Service(
YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL,
ApplicationHistoryProtocolPB.class)
};
}
}
| 1,667 | 36.066667 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.annotations.VisibleForTesting;
public class CrossOriginFilter implements Filter {
private static final Log LOG = LogFactory.getLog(CrossOriginFilter.class);
// HTTP CORS Request Headers
static final String ORIGIN = "Origin";
static final String ACCESS_CONTROL_REQUEST_METHOD =
"Access-Control-Request-Method";
static final String ACCESS_CONTROL_REQUEST_HEADERS =
"Access-Control-Request-Headers";
// HTTP CORS Response Headers
static final String ACCESS_CONTROL_ALLOW_ORIGIN =
"Access-Control-Allow-Origin";
static final String ACCESS_CONTROL_ALLOW_CREDENTIALS =
"Access-Control-Allow-Credentials";
static final String ACCESS_CONTROL_ALLOW_METHODS =
"Access-Control-Allow-Methods";
static final String ACCESS_CONTROL_ALLOW_HEADERS =
"Access-Control-Allow-Headers";
static final String ACCESS_CONTROL_MAX_AGE = "Access-Control-Max-Age";
// Filter configuration
public static final String ALLOWED_ORIGINS = "allowed-origins";
public static final String ALLOWED_ORIGINS_DEFAULT = "*";
public static final String ALLOWED_METHODS = "allowed-methods";
public static final String ALLOWED_METHODS_DEFAULT = "GET,POST,HEAD";
public static final String ALLOWED_HEADERS = "allowed-headers";
public static final String ALLOWED_HEADERS_DEFAULT =
"X-Requested-With,Content-Type,Accept,Origin";
public static final String MAX_AGE = "max-age";
public static final String MAX_AGE_DEFAULT = "1800";
private List<String> allowedMethods = new ArrayList<String>();
private List<String> allowedHeaders = new ArrayList<String>();
private List<String> allowedOrigins = new ArrayList<String>();
private boolean allowAllOrigins = true;
private String maxAge;
@Override
public void init(FilterConfig filterConfig) throws ServletException {
initializeAllowedMethods(filterConfig);
initializeAllowedHeaders(filterConfig);
initializeAllowedOrigins(filterConfig);
initializeMaxAge(filterConfig);
}
@Override
public void doFilter(ServletRequest req, ServletResponse res,
FilterChain chain)
throws IOException, ServletException {
doCrossFilter((HttpServletRequest) req, (HttpServletResponse) res);
chain.doFilter(req, res);
}
@Override
public void destroy() {
allowedMethods.clear();
allowedHeaders.clear();
allowedOrigins.clear();
}
private void doCrossFilter(HttpServletRequest req, HttpServletResponse res) {
String originsList = encodeHeader(req.getHeader(ORIGIN));
if (!isCrossOrigin(originsList)) {
if(LOG.isDebugEnabled()) {
LOG.debug("Header origin is null. Returning");
}
return;
}
if (!areOriginsAllowed(originsList)) {
if(LOG.isDebugEnabled()) {
LOG.debug("Header origins '" + originsList + "' not allowed. Returning");
}
return;
}
String accessControlRequestMethod =
req.getHeader(ACCESS_CONTROL_REQUEST_METHOD);
if (!isMethodAllowed(accessControlRequestMethod)) {
if(LOG.isDebugEnabled()) {
LOG.debug("Access control method '" + accessControlRequestMethod +
"' not allowed. Returning");
}
return;
}
String accessControlRequestHeaders =
req.getHeader(ACCESS_CONTROL_REQUEST_HEADERS);
if (!areHeadersAllowed(accessControlRequestHeaders)) {
if(LOG.isDebugEnabled()) {
LOG.debug("Access control headers '" + accessControlRequestHeaders +
"' not allowed. Returning");
}
return;
}
if(LOG.isDebugEnabled()) {
LOG.debug("Completed cross origin filter checks. Populating " +
"HttpServletResponse");
}
res.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, originsList);
res.setHeader(ACCESS_CONTROL_ALLOW_CREDENTIALS, Boolean.TRUE.toString());
res.setHeader(ACCESS_CONTROL_ALLOW_METHODS, getAllowedMethodsHeader());
res.setHeader(ACCESS_CONTROL_ALLOW_HEADERS, getAllowedHeadersHeader());
res.setHeader(ACCESS_CONTROL_MAX_AGE, maxAge);
}
@VisibleForTesting
String getAllowedHeadersHeader() {
return StringUtils.join(allowedHeaders, ',');
}
@VisibleForTesting
String getAllowedMethodsHeader() {
return StringUtils.join(allowedMethods, ',');
}
private void initializeAllowedMethods(FilterConfig filterConfig) {
String allowedMethodsConfig =
filterConfig.getInitParameter(ALLOWED_METHODS);
if (allowedMethodsConfig == null) {
allowedMethodsConfig = ALLOWED_METHODS_DEFAULT;
}
allowedMethods.addAll(
Arrays.asList(allowedMethodsConfig.trim().split("\\s*,\\s*")));
LOG.info("Allowed Methods: " + getAllowedMethodsHeader());
}
private void initializeAllowedHeaders(FilterConfig filterConfig) {
String allowedHeadersConfig =
filterConfig.getInitParameter(ALLOWED_HEADERS);
if (allowedHeadersConfig == null) {
allowedHeadersConfig = ALLOWED_HEADERS_DEFAULT;
}
allowedHeaders.addAll(
Arrays.asList(allowedHeadersConfig.trim().split("\\s*,\\s*")));
LOG.info("Allowed Headers: " + getAllowedHeadersHeader());
}
private void initializeAllowedOrigins(FilterConfig filterConfig) {
String allowedOriginsConfig =
filterConfig.getInitParameter(ALLOWED_ORIGINS);
if (allowedOriginsConfig == null) {
allowedOriginsConfig = ALLOWED_ORIGINS_DEFAULT;
}
allowedOrigins.addAll(
Arrays.asList(allowedOriginsConfig.trim().split("\\s*,\\s*")));
allowAllOrigins = allowedOrigins.contains("*");
LOG.info("Allowed Origins: " + StringUtils.join(allowedOrigins, ','));
LOG.info("Allow All Origins: " + allowAllOrigins);
}
private void initializeMaxAge(FilterConfig filterConfig) {
maxAge = filterConfig.getInitParameter(MAX_AGE);
if (maxAge == null) {
maxAge = MAX_AGE_DEFAULT;
}
LOG.info("Max Age: " + maxAge);
}
static String encodeHeader(final String header) {
if (header == null) {
return null;
}
// Protect against HTTP response splitting vulnerability
// since value is written as part of the response header
// Ensure this header only has one header by removing
// CRs and LFs
return header.split("\n|\r")[0].trim();
}
static boolean isCrossOrigin(String originsList) {
return originsList != null;
}
@VisibleForTesting
boolean areOriginsAllowed(String originsList) {
if (allowAllOrigins) {
return true;
}
String[] origins = originsList.trim().split("\\s+");
for (String origin : origins) {
for (String allowedOrigin : allowedOrigins) {
if (allowedOrigin.contains("*")) {
String regex = allowedOrigin.replace(".", "\\.").replace("*", ".*");
Pattern p = Pattern.compile(regex);
Matcher m = p.matcher(origin);
if (m.matches()) {
return true;
}
} else if (allowedOrigin.equals(origin)) {
return true;
}
}
}
return false;
}
private boolean areHeadersAllowed(String accessControlRequestHeaders) {
if (accessControlRequestHeaders == null) {
return true;
}
String headers[] = accessControlRequestHeaders.trim().split("\\s*,\\s*");
return allowedHeaders.containsAll(Arrays.asList(headers));
}
private boolean isMethodAllowed(String accessControlRequestMethod) {
if (accessControlRequestMethod == null) {
return true;
}
return allowedMethods.contains(accessControlRequestMethod);
}
}
| 8,952 | 33.434615 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
import org.apache.hadoop.yarn.server.timeline.NameValuePair;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
@Singleton
@Path("/ws/v1/timeline")
//TODO: support XML serialization/deserialization
public class TimelineWebServices {
private static final Log LOG = LogFactory.getLog(TimelineWebServices.class);
private TimelineDataManager timelineDataManager;
@Inject
public TimelineWebServices(TimelineDataManager timelineDataManager) {
this.timelineDataManager = timelineDataManager;
}
/**
* Return the description of the timeline web services.
*/
@GET
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineAbout about(
@Context HttpServletRequest req,
@Context HttpServletResponse res) {
init(res);
return TimelineUtils.createTimelineAbout("Timeline API");
}
/**
* Return a list of entities that match the given parameters.
*/
@GET
@Path("/{entityType}")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineEntities getEntities(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@QueryParam("primaryFilter") String primaryFilter,
@QueryParam("secondaryFilter") String secondaryFilter,
@QueryParam("windowStart") String windowStart,
@QueryParam("windowEnd") String windowEnd,
@QueryParam("fromId") String fromId,
@QueryParam("fromTs") String fromTs,
@QueryParam("limit") String limit,
@QueryParam("fields") String fields) {
init(res);
try {
return timelineDataManager.getEntities(
parseStr(entityType),
parsePairStr(primaryFilter, ":"),
parsePairsStr(secondaryFilter, ",", ":"),
parseLongStr(windowStart),
parseLongStr(windowEnd),
parseStr(fromId),
parseLongStr(fromTs),
parseLongStr(limit),
parseFieldsStr(fields, ","),
getUser(req));
} catch (NumberFormatException e) {
throw new BadRequestException(
"windowStart, windowEnd or limit is not a numeric value.");
} catch (IllegalArgumentException e) {
throw new BadRequestException("requested invalid field.");
} catch (Exception e) {
LOG.error("Error getting entities", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
/**
* Return a single entity of the given entity type and Id.
*/
@GET
@Path("/{entityType}/{entityId}")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@PathParam("entityId") String entityId,
@QueryParam("fields") String fields) {
init(res);
TimelineEntity entity = null;
try {
entity = timelineDataManager.getEntity(
parseStr(entityType),
parseStr(entityId),
parseFieldsStr(fields, ","),
getUser(req));
} catch (IllegalArgumentException e) {
throw new BadRequestException(
"requested invalid field.");
} catch (Exception e) {
LOG.error("Error getting entity", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
if (entity == null) {
throw new NotFoundException("Timeline entity "
+ new EntityIdentifier(parseStr(entityId), parseStr(entityType))
+ " is not found");
}
return entity;
}
/**
* Return the events that match the given parameters.
*/
@GET
@Path("/{entityType}/events")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineEvents getEvents(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@QueryParam("entityId") String entityId,
@QueryParam("eventType") String eventType,
@QueryParam("windowStart") String windowStart,
@QueryParam("windowEnd") String windowEnd,
@QueryParam("limit") String limit) {
init(res);
try {
return timelineDataManager.getEvents(
parseStr(entityType),
parseArrayStr(entityId, ","),
parseArrayStr(eventType, ","),
parseLongStr(windowStart),
parseLongStr(windowEnd),
parseLongStr(limit),
getUser(req));
} catch (NumberFormatException e) {
throw new BadRequestException(
"windowStart, windowEnd or limit is not a numeric value.");
} catch (Exception e) {
LOG.error("Error getting entity timelines", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
/**
* Store the given entities into the timeline store, and return the errors
* that happen during storing.
*/
@POST
@Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelinePutResponse postEntities(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
TimelineEntities entities) {
init(res);
UserGroupInformation callerUGI = getUser(req);
if (callerUGI == null) {
String msg = "The owner of the posted timeline entities is not set";
LOG.error(msg);
throw new ForbiddenException(msg);
}
try {
return timelineDataManager.postEntities(entities, callerUGI);
} catch (Exception e) {
LOG.error("Error putting entities", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
/**
* Store the given domain into the timeline store, and return the errors
* that happen during storing.
*/
@PUT
@Path("/domain")
@Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelinePutResponse putDomain(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
TimelineDomain domain) {
init(res);
UserGroupInformation callerUGI = getUser(req);
if (callerUGI == null) {
String msg = "The owner of the posted timeline domain is not set";
LOG.error(msg);
throw new ForbiddenException(msg);
}
domain.setOwner(callerUGI.getShortUserName());
try {
timelineDataManager.putDomain(domain, callerUGI);
} catch (YarnException e) {
// The user doesn't have the access to override the existing domain.
LOG.error(e.getMessage(), e);
throw new ForbiddenException(e);
} catch (IOException e) {
LOG.error("Error putting domain", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
return new TimelinePutResponse();
}
/**
* Return a single domain of the given domain Id.
*/
@GET
@Path("/domain/{domainId}")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineDomain getDomain(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("domainId") String domainId) {
init(res);
domainId = parseStr(domainId);
if (domainId == null || domainId.length() == 0) {
throw new BadRequestException("Domain ID is not specified.");
}
TimelineDomain domain = null;
try {
domain = timelineDataManager.getDomain(
parseStr(domainId), getUser(req));
} catch (Exception e) {
LOG.error("Error getting domain", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
if (domain == null) {
throw new NotFoundException("Timeline domain ["
+ domainId + "] is not found");
}
return domain;
}
/**
* Return a list of domains of the given owner.
*/
@GET
@Path("/domain")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public TimelineDomains getDomains(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@QueryParam("owner") String owner) {
init(res);
owner = parseStr(owner);
UserGroupInformation callerUGI = getUser(req);
if (owner == null || owner.length() == 0) {
if (callerUGI == null) {
throw new BadRequestException("Domain owner is not specified.");
} else {
// By default it's going to list the caller's domains
owner = callerUGI.getShortUserName();
}
}
try {
return timelineDataManager.getDomains(owner, callerUGI);
} catch (Exception e) {
LOG.error("Error getting domains", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
private void init(HttpServletResponse response) {
response.setContentType(null);
}
private static UserGroupInformation getUser(HttpServletRequest req) {
String remoteUser = req.getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
}
private static SortedSet<String> parseArrayStr(String str, String delimiter) {
if (str == null) {
return null;
}
SortedSet<String> strSet = new TreeSet<String>();
String[] strs = str.split(delimiter);
for (String aStr : strs) {
strSet.add(aStr.trim());
}
return strSet;
}
private static NameValuePair parsePairStr(String str, String delimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(delimiter, 2);
try {
return new NameValuePair(strs[0].trim(),
GenericObjectMapper.OBJECT_READER.readValue(strs[1].trim()));
} catch (Exception e) {
// didn't work as an Object, keep it as a String
return new NameValuePair(strs[0].trim(), strs[1].trim());
}
}
private static Collection<NameValuePair> parsePairsStr(
String str, String aDelimiter, String pDelimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(aDelimiter);
Set<NameValuePair> pairs = new HashSet<NameValuePair>();
for (String aStr : strs) {
pairs.add(parsePairStr(aStr, pDelimiter));
}
return pairs;
}
private static EnumSet<Field> parseFieldsStr(String str, String delimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(delimiter);
List<Field> fieldList = new ArrayList<Field>();
for (String s : strs) {
s = StringUtils.toUpperCase(s.trim());
if (s.equals("EVENTS")) {
fieldList.add(Field.EVENTS);
} else if (s.equals("LASTEVENTONLY")) {
fieldList.add(Field.LAST_EVENT_ONLY);
} else if (s.equals("RELATEDENTITIES")) {
fieldList.add(Field.RELATED_ENTITIES);
} else if (s.equals("PRIMARYFILTERS")) {
fieldList.add(Field.PRIMARY_FILTERS);
} else if (s.equals("OTHERINFO")) {
fieldList.add(Field.OTHER_INFO);
} else {
throw new IllegalArgumentException("Requested nonexistent field " + s);
}
}
if (fieldList.size() == 0) {
return null;
}
Field f1 = fieldList.remove(fieldList.size() - 1);
if (fieldList.size() == 0) {
return EnumSet.of(f1);
} else {
return EnumSet.of(f1, fieldList.toArray(new Field[fieldList.size()]));
}
}
private static Long parseLongStr(String str) {
return str == null ? null : Long.parseLong(str.trim());
}
private static String parseStr(String str) {
return str == null ? null : str.trim();
}
}
| 14,531 | 33.032787 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
public class CrossOriginFilterInitializer extends FilterInitializer {
public static final String PREFIX =
"yarn.timeline-service.http-cross-origin.";
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addGlobalFilter("Cross Origin Filter",
CrossOriginFilter.class.getName(), getFilterParameters(conf));
}
static Map<String, String> getFilterParameters(Configuration conf) {
Map<String, String> filterParams =
new HashMap<String, String>();
for (Map.Entry<String, String> entry : conf.getValByRegex(PREFIX)
.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
name = name.substring(PREFIX.length());
filterParams.put(name, value);
}
return filterParams;
}
}
| 1,868 | 34.264151 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/util/LeveldbUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.util;
import org.apache.hadoop.io.WritableComparator;
import java.io.IOException;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.yarn.server.timeline.GenericObjectMapper.readReverseOrderedLong;
public class LeveldbUtils {
/** A string builder utility for building timeline server leveldb keys. */
public static class KeyBuilder {
/** Maximum subkeys that can be added to construct a key. */
private static final int MAX_NUMBER_OF_KEY_ELEMENTS = 10;
private byte[][] b;
private boolean[] useSeparator;
private int index;
private int length;
public KeyBuilder(int size) {
b = new byte[size][];
useSeparator = new boolean[size];
index = 0;
length = 0;
}
public static KeyBuilder newInstance() {
return new KeyBuilder(MAX_NUMBER_OF_KEY_ELEMENTS);
}
/** Instantiate a new key build with the given maximum subkes.
* @param size maximum subkeys that can be added to this key builder
* @return a newly constructed key builder */
public static KeyBuilder newInstance(final int size) {
return new KeyBuilder(size);
}
public KeyBuilder add(String s) {
return add(s.getBytes(UTF_8), true);
}
public KeyBuilder add(byte[] t) {
return add(t, false);
}
public KeyBuilder add(byte[] t, boolean sep) {
b[index] = t;
useSeparator[index] = sep;
length += t.length;
if (sep) {
length++;
}
index++;
return this;
}
/** Builds a byte array without the final string delimiter. */
public byte[] getBytes() {
// check the last valid entry to see the final length
int bytesLength = length;
if (useSeparator[index - 1]) {
bytesLength = length - 1;
}
byte[] bytes = new byte[bytesLength];
int curPos = 0;
for (int i = 0; i < index; i++) {
System.arraycopy(b[i], 0, bytes, curPos, b[i].length);
curPos += b[i].length;
if (i < index - 1 && useSeparator[i]) {
bytes[curPos++] = 0x0;
}
}
return bytes;
}
/** Builds a byte array including the final string delimiter. */
public byte[] getBytesForLookup() {
byte[] bytes = new byte[length];
int curPos = 0;
for (int i = 0; i < index; i++) {
System.arraycopy(b[i], 0, bytes, curPos, b[i].length);
curPos += b[i].length;
if (useSeparator[i]) {
bytes[curPos++] = 0x0;
}
}
return bytes;
}
}
public static class KeyParser {
private final byte[] b;
private int offset;
public KeyParser(final byte[] b, final int offset) {
this.b = b;
this.offset = offset;
}
/** Returns a string from the offset until the next string delimiter. */
public String getNextString() throws IOException {
if (offset >= b.length) {
throw new IOException(
"tried to read nonexistent string from byte array");
}
int i = 0;
while (offset + i < b.length && b[offset + i] != 0x0) {
i++;
}
String s = new String(b, offset, i, UTF_8);
offset = offset + i + 1;
return s;
}
/** Moves current position until after the next end of string marker. */
public void skipNextString() throws IOException {
if (offset >= b.length) {
throw new IOException("tried to read nonexistent string from byte array");
}
while (offset < b.length && b[offset] != 0x0) {
++offset;
}
++offset;
}
/** Read the next 8 bytes in the byte buffer as a long. */
public long getNextLong() throws IOException {
if (offset + 8 >= b.length) {
throw new IOException("byte array ran out when trying to read long");
}
long value = readReverseOrderedLong(b, offset);
offset += 8;
return value;
}
public int getOffset() {
return offset;
}
/** Returns a copy of the remaining bytes. */
public byte[] getRemainingBytes() {
byte[] bytes = new byte[b.length - offset];
System.arraycopy(b, offset, bytes, 0, b.length - offset);
return bytes;
}
}
/**
* Returns true if the byte array begins with the specified prefix.
*/
public static boolean prefixMatches(byte[] prefix, int prefixlen,
byte[] b) {
if (b.length < prefixlen) {
return false;
}
return WritableComparator.compareBytes(prefix, 0, prefixlen, b, 0,
prefixlen) == 0;
}
}
| 5,384 | 28.751381 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.recovery;
import static org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.prefixMatches;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
import org.apache.hadoop.yarn.server.timeline.recovery.records.TimelineDelegationTokenIdentifierData;
import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyBuilder;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.fusesource.leveldbjni.JniDBFactory;
import org.fusesource.leveldbjni.internal.NativeDB;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
/**
* A timeline service state storage implementation that supports any persistent
* storage that adheres to the LevelDB interface.
*/
public class LeveldbTimelineStateStore extends
TimelineStateStore {
public static final Log LOG =
LogFactory.getLog(LeveldbTimelineStateStore.class);
private static final String DB_NAME = "timeline-state-store.ldb";
private static final FsPermission LEVELDB_DIR_UMASK = FsPermission
.createImmutable((short) 0700);
private static final byte[] TOKEN_ENTRY_PREFIX = bytes("t");
private static final byte[] TOKEN_MASTER_KEY_ENTRY_PREFIX = bytes("k");
private static final byte[] LATEST_SEQUENCE_NUMBER_KEY = bytes("s");
private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 0);
private static final byte[] TIMELINE_STATE_STORE_VERSION_KEY = bytes("v");
private DB db;
public LeveldbTimelineStateStore() {
super(LeveldbTimelineStateStore.class.getName());
}
@Override
protected void initStorage(Configuration conf) throws IOException {
}
@Override
protected void startStorage() throws IOException {
Options options = new Options();
Path dbPath =
new Path(
getConfig().get(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH),
DB_NAME);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(getConfig());
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " +
"timeline store " + dbPath);
}
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
JniDBFactory factory = new JniDBFactory();
try {
options.createIfMissing(false);
db = factory.open(new File(dbPath.toString()), options);
LOG.info("Loading the existing database at th path: " + dbPath.toString());
checkVersion();
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
try {
options.createIfMissing(true);
db = factory.open(new File(dbPath.toString()), options);
LOG.info("Creating a new database at th path: " + dbPath.toString());
storeVersion(CURRENT_VERSION_INFO);
} catch (DBException ex) {
throw new IOException(ex);
}
} else {
throw new IOException(e);
}
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
protected void closeStorage() throws IOException {
IOUtils.cleanup(LOG, db);
}
@Override
public TimelineServiceState loadState() throws IOException {
LOG.info("Loading timeline service state from leveldb");
TimelineServiceState state = new TimelineServiceState();
int numKeys = loadTokenMasterKeys(state);
int numTokens = loadTokens(state);
loadLatestSequenceNumber(state);
LOG.info("Loaded " + numKeys + " master keys and " + numTokens
+ " tokens from leveldb, and latest sequence number is "
+ state.getLatestSequenceNumber());
return state;
}
@Override
public void storeToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
DataOutputStream ds = null;
WriteBatch batch = null;
try {
byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
if (db.get(k) != null) {
throw new IOException(tokenId + " already exists");
}
byte[] v = buildTokenData(tokenId, renewDate);
ByteArrayOutputStream bs = new ByteArrayOutputStream();
ds = new DataOutputStream(bs);
ds.writeInt(tokenId.getSequenceNumber());
batch = db.createWriteBatch();
batch.put(k, v);
batch.put(LATEST_SEQUENCE_NUMBER_KEY, bs.toByteArray());
db.write(batch);
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, ds);
IOUtils.cleanup(LOG, batch);
}
}
@Override
public void updateToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
try {
byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
if (db.get(k) == null) {
throw new IOException(tokenId + " doesn't exist");
}
byte[] v = buildTokenData(tokenId, renewDate);
db.put(k, v);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void removeToken(TimelineDelegationTokenIdentifier tokenId)
throws IOException {
try {
byte[] key = createTokenEntryKey(tokenId.getSequenceNumber());
db.delete(key);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
try {
byte[] k = createTokenMasterKeyEntryKey(key.getKeyId());
if (db.get(k) != null) {
throw new IOException(key + " already exists");
}
byte[] v = buildTokenMasterKeyData(key);
db.put(k, v);
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void removeTokenMasterKey(DelegationKey key) throws IOException {
try {
byte[] k = createTokenMasterKeyEntryKey(key.getKeyId());
db.delete(k);
} catch (DBException e) {
throw new IOException(e);
}
}
private static byte[] buildTokenData(
TimelineDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
TimelineDelegationTokenIdentifierData data =
new TimelineDelegationTokenIdentifierData(tokenId, renewDate);
return data.toByteArray();
}
private static byte[] buildTokenMasterKeyData(DelegationKey key)
throws IOException {
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
} finally {
IOUtils.cleanup(LOG, dataStream);
}
return memStream.toByteArray();
}
private static void loadTokenMasterKeyData(TimelineServiceState state,
byte[] keyData)
throws IOException {
DelegationKey key = new DelegationKey();
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(keyData));
try {
key.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
private static void loadTokenData(TimelineServiceState state, byte[] tokenData)
throws IOException {
TimelineDelegationTokenIdentifierData data =
new TimelineDelegationTokenIdentifierData();
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(tokenData));
try {
data.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenState.put(data.getTokenIdentifier(), data.getRenewDate());
}
private int loadTokenMasterKeys(TimelineServiceState state)
throws IOException {
byte[] base = KeyBuilder.newInstance().add(TOKEN_MASTER_KEY_ENTRY_PREFIX)
.getBytesForLookup();
int numKeys = 0;
LeveldbIterator iterator = null;
try {
for (iterator = new LeveldbIterator(db), iterator.seek(base);
iterator.hasNext(); iterator.next()) {
byte[] k = iterator.peekNext().getKey();
if (!prefixMatches(base, base.length, k)) {
break;
}
byte[] v = iterator.peekNext().getValue();
loadTokenMasterKeyData(state, v);
++numKeys;
}
} finally {
IOUtils.cleanup(LOG, iterator);
}
return numKeys;
}
private int loadTokens(TimelineServiceState state) throws IOException {
byte[] base = KeyBuilder.newInstance().add(TOKEN_ENTRY_PREFIX)
.getBytesForLookup();
int numTokens = 0;
LeveldbIterator iterator = null;
try {
for (iterator = new LeveldbIterator(db), iterator.seek(base);
iterator.hasNext(); iterator.next()) {
byte[] k = iterator.peekNext().getKey();
if (!prefixMatches(base, base.length, k)) {
break;
}
byte[] v = iterator.peekNext().getValue();
loadTokenData(state, v);
++numTokens;
}
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, iterator);
}
return numTokens;
}
private void loadLatestSequenceNumber(TimelineServiceState state)
throws IOException {
byte[] data = null;
try {
data = db.get(LATEST_SEQUENCE_NUMBER_KEY);
} catch (DBException e) {
throw new IOException(e);
}
if (data != null) {
DataInputStream in = new DataInputStream(new ByteArrayInputStream(data));
try {
state.latestSequenceNumber = in.readInt();
} finally {
IOUtils.cleanup(LOG, in);
}
}
}
/**
* Creates a domain entity key with column name suffix, of the form
* TOKEN_ENTRY_PREFIX + sequence number.
*/
private static byte[] createTokenEntryKey(int seqNum) throws IOException {
return KeyBuilder.newInstance().add(TOKEN_ENTRY_PREFIX)
.add(Integer.toString(seqNum)).getBytes();
}
/**
* Creates a domain entity key with column name suffix, of the form
* TOKEN_MASTER_KEY_ENTRY_PREFIX + sequence number.
*/
private static byte[] createTokenMasterKeyEntryKey(int keyId)
throws IOException {
return KeyBuilder.newInstance().add(TOKEN_MASTER_KEY_ENTRY_PREFIX)
.add(Integer.toString(keyId)).getBytes();
}
@VisibleForTesting
Version loadVersion() throws IOException {
try {
byte[] data = db.get(TIMELINE_STATE_STORE_VERSION_KEY);
// if version is not stored previously, treat it as CURRENT_VERSION_INFO.
if (data == null || data.length == 0) {
return getCurrentVersion();
}
Version version =
new VersionPBImpl(
YarnServerCommonProtos.VersionProto.parseFrom(data));
return version;
} catch (DBException e) {
throw new IOException(e);
}
}
@VisibleForTesting
void storeVersion(Version state) throws IOException {
byte[] data =
((VersionPBImpl) state).getProto().toByteArray();
try {
db.put(TIMELINE_STATE_STORE_VERSION_KEY, data);
} catch (DBException e) {
throw new IOException(e);
}
}
@VisibleForTesting
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning timeline state store:
* major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of TS-store is a major upgrade, and any
* compatible change of TS-store is a minor upgrade.
* 3) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade timeline store or remove incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded timeline state store version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing timeline state store version info " + getCurrentVersion());
storeVersion(CURRENT_VERSION_INFO);
} else {
String incompatibleMessage =
"Incompatible version for timeline state store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
}
| 14,164 | 32.646081 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/MemoryTimelineStateStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.recovery;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
/**
* A state store backed by memory for unit tests
*/
public class MemoryTimelineStateStore
extends TimelineStateStore {
private TimelineServiceState state;
@Override
protected void initStorage(Configuration conf) throws IOException {
}
@Override
protected void startStorage() throws IOException {
state = new TimelineServiceState();
}
@Override
protected void closeStorage() throws IOException {
state = null;
}
@Override
public TimelineServiceState loadState() throws IOException {
TimelineServiceState result = new TimelineServiceState();
result.tokenState.putAll(state.tokenState);
result.tokenMasterKeyState.addAll(state.tokenMasterKeyState);
result.latestSequenceNumber = state.latestSequenceNumber;
return result;
}
@Override
public void storeToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
if (state.tokenState.containsKey(tokenId)) {
throw new IOException("token " + tokenId + " was stored twice");
}
state.tokenState.put(tokenId, renewDate);
state.latestSequenceNumber = tokenId.getSequenceNumber();
}
@Override
public void updateToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
if (!state.tokenState.containsKey(tokenId)) {
throw new IOException("token " + tokenId + " not in store");
}
state.tokenState.put(tokenId, renewDate);
}
@Override
public void removeToken(TimelineDelegationTokenIdentifier tokenId)
throws IOException {
state.tokenState.remove(tokenId);
}
@Override
public void storeTokenMasterKey(DelegationKey key)
throws IOException {
if (state.tokenMasterKeyState.contains(key)) {
throw new IOException("token master key " + key + " was stored twice");
}
state.tokenMasterKeyState.add(key);
}
@Override
public void removeTokenMasterKey(DelegationKey key)
throws IOException {
state.tokenMasterKeyState.remove(key);
}
}
| 3,102 | 30.989691 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/TimelineStateStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.recovery;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
@Private
@Unstable
/**
* Base class for timeline service state storage.
* Storage implementations need to implement blocking store and load methods
* to actually store and load the state.
*/
public abstract class TimelineStateStore extends AbstractService {
public static class TimelineServiceState {
int latestSequenceNumber = 0;
Map<TimelineDelegationTokenIdentifier, Long> tokenState =
new HashMap<TimelineDelegationTokenIdentifier, Long>();
Set<DelegationKey> tokenMasterKeyState = new HashSet<DelegationKey>();
public int getLatestSequenceNumber() {
return latestSequenceNumber;
}
public Map<TimelineDelegationTokenIdentifier, Long> getTokenState() {
return tokenState;
}
public Set<DelegationKey> getTokenMasterKeyState() {
return tokenMasterKeyState;
}
}
public TimelineStateStore() {
super(TimelineStateStore.class.getName());
}
public TimelineStateStore(String name) {
super(name);
}
/**
* Initialize the state storage
*
* @param conf the configuration
* @throws IOException
*/
@Override
public void serviceInit(Configuration conf) throws IOException {
initStorage(conf);
}
/**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart() throws IOException {
startStorage();
}
/**
* Shutdown the state storage.
*
* @throws IOException
*/
@Override
public void serviceStop() throws IOException {
closeStorage();
}
/**
* Implementation-specific initialization.
*
* @param conf the configuration
* @throws IOException
*/
protected abstract void initStorage(Configuration conf) throws IOException;
/**
* Implementation-specific startup.
*
* @throws IOException
*/
protected abstract void startStorage() throws IOException;
/**
* Implementation-specific shutdown.
*
* @throws IOException
*/
protected abstract void closeStorage() throws IOException;
/**
* Load the timeline service state from the state storage.
*
* @throws IOException
*/
public abstract TimelineServiceState loadState() throws IOException;
/**
* Blocking method to store a delegation token along with the current token
* sequence number to the state storage.
*
* Implementations must not return from this method until the token has been
* committed to the state store.
*
* @param tokenId the token to store
* @param renewDate the token renewal deadline
* @throws IOException
*/
public abstract void storeToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException;
/**
* Blocking method to update the expiration of a delegation token
* in the state storage.
*
* Implementations must not return from this method until the expiration
* date of the token has been updated in the state store.
*
* @param tokenId the token to update
* @param renewDate the new token renewal deadline
* @throws IOException
*/
public abstract void updateToken(TimelineDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException;
/**
* Blocking method to remove a delegation token from the state storage.
*
* Implementations must not return from this method until the token has been
* removed from the state store.
*
* @param tokenId the token to remove
* @throws IOException
*/
public abstract void removeToken(TimelineDelegationTokenIdentifier tokenId)
throws IOException;
/**
* Blocking method to store a delegation token master key.
*
* Implementations must not return from this method until the key has been
* committed to the state store.
*
* @param key the master key to store
* @throws IOException
*/
public abstract void storeTokenMasterKey(
DelegationKey key) throws IOException;
/**
* Blocking method to remove a delegation token master key.
*
* Implementations must not return from this method until the key has been
* removed from the state store.
*
* @param key the master key to remove
* @throws IOException
*/
public abstract void removeTokenMasterKey(DelegationKey key)
throws IOException;
}
| 5,626 | 28.005155 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/records/TimelineDelegationTokenIdentifierData.java
|
package org.apache.hadoop.yarn.server.timeline.recovery.records;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.hadoop.yarn.proto.YarnServerTimelineServerRecoveryProtos.TimelineDelegationTokenIdentifierDataProto;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
public class TimelineDelegationTokenIdentifierData {
TimelineDelegationTokenIdentifierDataProto.Builder builder =
TimelineDelegationTokenIdentifierDataProto.newBuilder();
public TimelineDelegationTokenIdentifierData() {
}
public TimelineDelegationTokenIdentifierData(
TimelineDelegationTokenIdentifier identifier, long renewdate) {
builder.setTokenIdentifier(identifier.getProto());
builder.setRenewDate(renewdate);
}
public void readFields(DataInput in) throws IOException {
builder.mergeFrom((DataInputStream) in);
}
public byte[] toByteArray() throws IOException {
return builder.build().toByteArray();
}
public TimelineDelegationTokenIdentifier getTokenIdentifier()
throws IOException {
ByteArrayInputStream in =
new ByteArrayInputStream(builder.getTokenIdentifier().toByteArray());
TimelineDelegationTokenIdentifier identifer =
new TimelineDelegationTokenIdentifier();
identifer.readFields(new DataInputStream(in));
return identifer;
}
public long getRenewDate() {
return builder.getRenewDate();
}
}
| 2,306 | 35.046875 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import java.util.HashSet;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
/**
* Unit test class to compare
* {@link org.apache.hadoop.yarn.conf.YarnConfiguration} and
* yarn-default.xml for missing properties. Currently only throws an error
* if the class is missing a property.
* <p></p>
* Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
* for how this class works.
*/
public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
@SuppressWarnings("deprecation")
@Override
public void initializeMemberVariables() {
xmlFilename = new String("yarn-default.xml");
configurationClasses = new Class[] { YarnConfiguration.class };
// Allocate for usage
configurationPropsToSkipCompare = new HashSet<String>();
configurationPrefixToSkipCompare = new HashSet<String>();
// Set error modes
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;
// Specific properties to skip
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_IPC_RECORD_FACTORY_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_IPC_CLIENT_FACTORY_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_IPC_SERVER_FACTORY_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_IPC_RPC_IMPL);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_RM_SCHEDULER);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONCLIENT_PROTOCOL);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL);
configurationPropsToSkipCompare
.add(YarnConfiguration
.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_SCM_STORE_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS);
configurationPropsToSkipCompare
.add(YarnConfiguration.DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL);
// Ignore all YARN Application Timeline Service (version 1) properties
configurationPrefixToSkipCompare.add("yarn.timeline-service.");
// Used as Java command line properties, not XML
configurationPrefixToSkipCompare.add("yarn.app.container");
// Ignore NodeManager "work in progress" variables
configurationPrefixToSkipCompare
.add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);
configurationPrefixToSkipCompare
.add(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE);
configurationPrefixToSkipCompare
.add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT);
configurationPrefixToSkipCompare
.add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT);
configurationPrefixToSkipCompare
.add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED);
// Set by container-executor.cfg
configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR);
// Ignore deprecated properties
configurationPrefixToSkipCompare
.add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
// Allocate for usage
xmlPropsToSkipCompare = new HashSet<String>();
xmlPrefixToSkipCompare = new HashSet<String>();
// Should probably be moved from yarn-default.xml to mapred-default.xml
xmlPropsToSkipCompare.add("mapreduce.job.hdfs-servers");
xmlPropsToSkipCompare.add("mapreduce.job.jar");
// Possibly obsolete, but unable to verify 100%
xmlPropsToSkipCompare.add("yarn.nodemanager.aux-services.mapreduce_shuffle.class");
xmlPropsToSkipCompare.add("yarn.resourcemanager.container.liveness-monitor.interval-ms");
// Used in the XML file as a variable reference internal to the XML file
xmlPropsToSkipCompare.add("yarn.nodemanager.hostname");
// Ignore all YARN Application Timeline Service (version 1) properties
xmlPrefixToSkipCompare.add("yarn.timeline-service");
// Currently defined in RegistryConstants/core-site.xml
xmlPrefixToSkipCompare.add("hadoop.registry");
}
}
| 5,765 | 41.087591 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.factory.providers;
import org.apache.hadoop.classification.InterfaceAudience;
| 943 | 41.909091 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factory.providers;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@LimitedPrivate({ "MapReduce", "YARN" })
@Unstable
public class RecordFactoryProvider {
private static Configuration defaultConf;
static {
defaultConf = new Configuration();
}
private RecordFactoryProvider() {
}
public static RecordFactory getRecordFactory(Configuration conf) {
if (conf == null) {
//Assuming the default configuration has the correct factories set.
//Users can specify a particular factory by providing a configuration.
conf = defaultConf;
}
String recordFactoryClassName = conf.get(
YarnConfiguration.IPC_RECORD_FACTORY_CLASS,
YarnConfiguration.DEFAULT_IPC_RECORD_FACTORY_CLASS);
return (RecordFactory) getFactoryClassInstance(recordFactoryClassName);
}
private static Object getFactoryClassInstance(String factoryClassName) {
try {
Class<?> clazz = Class.forName(factoryClassName);
Method method = clazz.getMethod("get", null);
method.setAccessible(true);
return method.invoke(null, null);
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException(e);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException(e);
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
}
}
}
| 2,668 | 36.069444 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.api;
import org.apache.hadoop.classification.InterfaceAudience;
| 928 | 41.227273 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
/**
* This is the API for the applications comprising of constants that YARN sets
* up for the applications and the containers.
*
* TODO: Investigate the semantics and security of each cross-boundary refs.
*/
@Public
@Evolving
public interface ApplicationConstants {
/**
* The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
* only
*/
public static final String APP_SUBMIT_TIME_ENV = "APP_SUBMIT_TIME_ENV";
/**
* The cache file into which container token is written
*/
public static final String CONTAINER_TOKEN_FILE_ENV_NAME =
UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
/**
* The environmental variable for APPLICATION_WEB_PROXY_BASE. Set in
* ApplicationMaster's environment only. This states that for all non-relative
* web URLs in the app masters web UI what base should they have.
*/
public static final String APPLICATION_WEB_PROXY_BASE_ENV =
"APPLICATION_WEB_PROXY_BASE";
/**
* The temporary environmental variable for container log directory. This
* should be replaced by real container log directory on container launch.
*/
public static final String LOG_DIR_EXPANSION_VAR = "<LOG_DIR>";
/**
* This constant is used to construct class path and it will be replaced with
* real class path separator(':' for Linux and ';' for Windows) by
* NodeManager on container launch. User has to use this constant to construct
* class path if user wants cross-platform practice i.e. submit an application
* from a Windows client to a Linux/Unix server or vice versa.
*/
@Public
@Unstable
public static final String CLASS_PATH_SEPARATOR= "<CPS>";
/**
* The following two constants are used to expand parameter and it will be
* replaced with real parameter expansion marker ('%' for Windows and '$' for
* Linux) by NodeManager on container launch. For example: {{VAR}} will be
* replaced as $VAR on Linux, and %VAR% on Windows. User has to use this
* constant to construct class path if user wants cross-platform practice i.e.
* submit an application from a Windows client to a Linux/Unix server or vice
* versa.
*/
@Public
@Unstable
public static final String PARAMETER_EXPANSION_LEFT="{{";
/**
* User has to use this constant to construct class path if user wants
* cross-platform practice i.e. submit an application from a Windows client to
* a Linux/Unix server or vice versa.
*/
@Public
@Unstable
public static final String PARAMETER_EXPANSION_RIGHT="}}";
public static final String STDERR = "stderr";
public static final String STDOUT = "stdout";
/**
* The environment variable for MAX_APP_ATTEMPTS. Set in AppMaster environment
* only
*/
public static final String MAX_APP_ATTEMPTS_ENV = "MAX_APP_ATTEMPTS";
/**
* Environment for Applications.
*
* Some of the environment variables for applications are <em>final</em>
* i.e. they cannot be modified by the applications.
*/
public enum Environment {
/**
* $USER
* Final, non-modifiable.
*/
USER("USER"),
/**
* $LOGNAME
* Final, non-modifiable.
*/
LOGNAME("LOGNAME"),
/**
* $HOME
* Final, non-modifiable.
*/
HOME("HOME"),
/**
* $PWD
* Final, non-modifiable.
*/
PWD("PWD"),
/**
* $PATH
*/
PATH("PATH"),
/**
* $SHELL
*/
SHELL("SHELL"),
/**
* $JAVA_HOME
*/
JAVA_HOME("JAVA_HOME"),
/**
* $CLASSPATH
*/
CLASSPATH("CLASSPATH"),
/**
* $APP_CLASSPATH
*/
APP_CLASSPATH("APP_CLASSPATH"),
/**
* $LD_LIBRARY_PATH
*/
LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
/**
* $HADOOP_CONF_DIR
* Final, non-modifiable.
*/
HADOOP_CONF_DIR("HADOOP_CONF_DIR"),
/**
* $HADOOP_COMMON_HOME
*/
HADOOP_COMMON_HOME("HADOOP_COMMON_HOME"),
/**
* $HADOOP_HDFS_HOME
*/
HADOOP_HDFS_HOME("HADOOP_HDFS_HOME"),
/**
* $MALLOC_ARENA_MAX
*/
MALLOC_ARENA_MAX("MALLOC_ARENA_MAX"),
/**
* $HADOOP_YARN_HOME
*/
HADOOP_YARN_HOME("HADOOP_YARN_HOME"),
/**
* $CLASSPATH_PREPEND_DISTCACHE
* Private, Windows specific
*/
@Private
CLASSPATH_PREPEND_DISTCACHE("CLASSPATH_PREPEND_DISTCACHE"),
/**
* $CONTAINER_ID
* Final, exported by NodeManager and non-modifiable by users.
*/
CONTAINER_ID("CONTAINER_ID"),
/**
* $NM_HOST
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_HOST("NM_HOST"),
/**
* $NM_HTTP_PORT
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_HTTP_PORT("NM_HTTP_PORT"),
/**
* $NM_PORT
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_PORT("NM_PORT"),
/**
* $LOCAL_DIRS
* Final, exported by NodeManager and non-modifiable by users.
*/
LOCAL_DIRS("LOCAL_DIRS"),
/**
* $LOG_DIRS
* Final, exported by NodeManager and non-modifiable by users.
* Comma separate list of directories that the container should use for
* logging.
*/
LOG_DIRS("LOG_DIRS");
private final String variable;
private Environment(String variable) {
this.variable = variable;
}
public String key() {
return variable;
}
public String toString() {
return variable;
}
/**
* Expand the environment variable based on client OS environment variable
* expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
* <p>
* Note: Use $$() method for cross-platform practice i.e. submit an
* application from a Windows client to a Linux/Unix server or vice versa.
* </p>
*/
public String $() {
if (Shell.WINDOWS) {
return "%" + variable + "%";
} else {
return "$" + variable;
}
}
/**
* Expand the environment variable in platform-agnostic syntax. The
* parameter expansion marker "{{VAR}}" will be replaced with real parameter
* expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
* container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
* and %VAR% on Windows.
*/
@Public
@Unstable
public String $$() {
return PARAMETER_EXPANSION_LEFT + variable + PARAMETER_EXPANSION_RIGHT;
}
}
}
| 7,697 | 26.297872 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
* <p>
* The protocol between clients and the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> to get information on applications,
* application attempts and containers.
* </p>
*
*/
@Private
@Unstable
public interface ApplicationBaseProtocol {
/**
* The interface used by clients to get a report of an Application from the
* <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
* <p>
* The client, via {@link GetApplicationReportRequest} provides the
* {@link ApplicationId} of the application.
* <p>
* In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the application,
* queue etc. before accepting the request.
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationReportResponse} which includes the
* {@link ApplicationReport} for the application.
* <p>
* If the user does not have <code>VIEW_APP</code> access then the following
* fields in the report will be set to stubbed values:
* <ul>
* <li>host - set to "N/A"</li>
* <li>RPC port - set to -1</li>
* <li>client token - set to "N/A"</li>
* <li>diagnostics - set to "N/A"</li>
* <li>tracking URL - set to "N/A"</li>
* <li>original tracking URL - set to "N/A"</li>
* <li>resource usage report - all values are -1</li>
* </ul>
*
* @param request
* request for an application report
* @return application report
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to get a report of Applications matching the
* filters defined by {@link GetApplicationsRequest} in the cluster from the
* <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
* </p>
*
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationsResponse} which includes the
* {@link ApplicationReport} for the applications.
* </p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(GetApplicationReportRequest)}.
* </p>
*
* @param request
* request for report on applications
* @return report on applications matching the given application types defined
* in the request
* @throws YarnException
* @throws IOException
* @see GetApplicationsRequest
*/
@Public
@Stable
@Idempotent
public GetApplicationsResponse
getApplications(GetApplicationsRequest request) throws YarnException,
IOException;
/**
* The interface used by clients to get a report of an Application Attempt
* from the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code>
* <p>
* The client, via {@link GetApplicationAttemptReportRequest} provides the
* {@link ApplicationAttemptId} of the application attempt.
* <p>
* In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the method before
* accepting the request.
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationAttemptReportResponse} which includes
* the {@link ApplicationAttemptReport} for the application attempt.
* <p>
* If the user does not have <code>VIEW_APP</code> access then the following
* fields in the report will be set to stubbed values:
* <ul>
* <li>host</li>
* <li>RPC port</li>
* <li>client token</li>
* <li>diagnostics - set to "N/A"</li>
* <li>tracking URL</li>
* </ul>
*
* @param request
* request for an application attempt report
* @return application attempt report
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
@Idempotent
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException;
/**
* <p>
* The interface used by clients to get a report of all Application attempts
* in the cluster from the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code>
* </p>
*
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationAttemptsRequest} which includes the
* {@link ApplicationAttemptReport} for all the applications attempts of a
* specified application attempt.
* </p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
* </p>
*
* @param request
* request for reports on all application attempts of an application
* @return reports on all application attempts of an application
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
@Idempotent
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to get a report of an Container from the
* <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* </p>
*
* <p>
* The client, via {@link GetContainerReportRequest} provides the
* {@link ContainerId} of the container.
* </p>
*
* <p>
* In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the method before
* accepting the request.
* </p>
*
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetContainerReportResponse} which includes the
* {@link ContainerReport} for the container.
* </p>
*
* @param request
* request for a container report
* @return container report
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
@Idempotent
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to get a report of Containers for an
* application attempt from the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code>
* </p>
*
* <p>
* The client, via {@link GetContainersRequest} provides the
* {@link ApplicationAttemptId} of the application attempt.
* </p>
*
* <p>
* In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the method before
* accepting the request.
* </p>
*
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetContainersResponse} which includes a list of
* {@link ContainerReport} for all the containers of a specific application
* attempt.
* </p>
*
* @param request
* request for a list of container reports of an application attempt.
* @return reports on all containers of an application attempt
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
@Idempotent
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException;
/**
* <p>
* The interface used by clients to get delegation token, enabling the
* containers to be able to talk to the service using those tokens.
*
* <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with the delegation {@link Token} that can be used by the client
* to speak to this service.
*
* @param request
* request to get a delegation token for the client.
* @return delegation token that can be used to talk to this service
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws YarnException, IOException;
/**
* Renew an existing delegation {@link Token}.
*
* @param request
* the delegation token to be renewed.
* @return the new expiry time for the delegation token.
* @throws YarnException
* @throws IOException
*/
@Private
@Unstable
@Idempotent
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException, IOException;
/**
* Cancel an existing delegation {@link Token}.
*
* @param request
* the delegation token to be cancelled.
* @return an empty response.
* @throws YarnException
* @throws IOException
*/
@Private
@Unstable
@Idempotent
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException, IOException;
}
| 12,441 | 36.029762 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
* <p>The protocol between clients and the <code>ResourceManager</code>
* to submit/abort jobs and to get information on applications, cluster metrics,
* nodes, queues and ACLs.</p>
*/
@Public
@Stable
public interface ApplicationClientProtocol extends ApplicationBaseProtocol {
/**
* <p>The interface used by clients to obtain a new {@link ApplicationId} for
* submitting new applications.</p>
*
* <p>The <code>ResourceManager</code> responds with a new, monotonically
* increasing, {@link ApplicationId} which is used by the client to submit
* a new application.</p>
*
* <p>The <code>ResourceManager</code> also responds with details such
* as maximum resource capabilities in the cluster as specified in
* {@link GetNewApplicationResponse}.</p>
*
* @param request request to get a new <code>ApplicationId</code>
* @return response containing the new <code>ApplicationId</code> to be used
* to submit an application
* @throws YarnException
* @throws IOException
* @see #submitApplication(SubmitApplicationRequest)
*/
@Public
@Stable
@Idempotent
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to submit a new application to the
* <code>ResourceManager.</code></p>
*
* <p>The client is required to provide details such as queue,
* {@link Resource} required to run the <code>ApplicationMaster</code>,
* the equivalent of {@link ContainerLaunchContext} for launching
* the <code>ApplicationMaster</code> etc. via the
* {@link SubmitApplicationRequest}.</p>
*
* <p>Currently the <code>ResourceManager</code> sends an immediate (empty)
* {@link SubmitApplicationResponse} on accepting the submission and throws
* an exception if it rejects the submission. However, this call needs to be
* followed by {@link #getApplicationReport(GetApplicationReportRequest)}
* to make sure that the application gets properly submitted - obtaining a
* {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
* that RM 'remembers' this application beyond failover or restart. If RM
* failover or RM restart happens before ResourceManager saves the
* application's state successfully, the subsequent
* {@link #getApplicationReport(GetApplicationReportRequest)} will throw
* a {@link ApplicationNotFoundException}. The Clients need to re-submit
* the application with the same {@link ApplicationSubmissionContext} when
* it encounters the {@link ApplicationNotFoundException} on the
* {@link #getApplicationReport(GetApplicationReportRequest)} call.</p>
*
* <p>During the submission process, it checks whether the application
* already exists. If the application exists, it will simply return
* SubmitApplicationResponse</p>
*
* <p> In secure mode,the <code>ResourceManager</code> verifies access to
* queues etc. before accepting the application submission.</p>
*
* @param request request to submit a new application
* @return (empty) response on accepting the submission
* @throws YarnException
* @throws IOException
* @see #getNewApplication(GetNewApplicationRequest)
*/
@Public
@Stable
@Idempotent
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to request the
* <code>ResourceManager</code> to abort submitted application.</p>
*
* <p>The client, via {@link KillApplicationRequest} provides the
* {@link ApplicationId} of the application to be aborted.</p>
*
* <p> In secure mode,the <code>ResourceManager</code> verifies access to the
* application, queue etc. before terminating the application.</p>
*
* <p>Currently, the <code>ResourceManager</code> returns an empty response
* on success and throws an exception on rejecting the request.</p>
*
* @param request request to abort a submitted application
* @return <code>ResourceManager</code> returns an empty response
* on success and throws an exception on rejecting the request
* @throws YarnException
* @throws IOException
* @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)
*/
@Public
@Stable
@Idempotent
public KillApplicationResponse forceKillApplication(
KillApplicationRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to get metrics about the cluster from
* the <code>ResourceManager</code>.</p>
*
* <p>The <code>ResourceManager</code> responds with a
* {@link GetClusterMetricsResponse} which includes the
* {@link YarnClusterMetrics} with details such as number of current
* nodes in the cluster.</p>
*
* @param request request for cluster metrics
* @return cluster metrics
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetClusterMetricsResponse getClusterMetrics(
GetClusterMetricsRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to get a report of all nodes
* in the cluster from the <code>ResourceManager</code>.</p>
*
* <p>The <code>ResourceManager</code> responds with a
* {@link GetClusterNodesResponse} which includes the
* {@link NodeReport} for all the nodes in the cluster.</p>
*
* @param request request for report on all nodes
* @return report on all nodes
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetClusterNodesResponse getClusterNodes(
GetClusterNodesRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to get information about <em>queues</em>
* from the <code>ResourceManager</code>.</p>
*
* <p>The client, via {@link GetQueueInfoRequest}, can ask for details such
* as used/total resources, child queues, running applications etc.</p>
*
* <p> In secure mode,the <code>ResourceManager</code> verifies access before
* providing the information.</p>
*
* @param request request to get queue information
* @return queue information
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetQueueInfoResponse getQueueInfo(
GetQueueInfoRequest request)
throws YarnException, IOException;
/**
* <p>The interface used by clients to get information about <em>queue
* acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
* </p>
*
* <p>The <code>ResourceManager</code> responds with queue acls for all
* existing queues.</p>
*
* @param request request to get queue acls for <em>current user</em>
* @return queue acls for <em>current user</em>
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
@Idempotent
public GetQueueUserAclsInfoResponse getQueueUserAcls(
GetQueueUserAclsInfoRequest request)
throws YarnException, IOException;
/**
* Move an application to a new queue.
*
* @param request the application ID and the target queue
* @return an empty response
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
@Idempotent
public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(
MoveApplicationAcrossQueuesRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to submit a new reservation to the
* {@code ResourceManager}.
* </p>
*
* <p>
* The client packages all details of its request in a
* {@link ReservationSubmissionRequest} object. This contains information
* about the amount of capacity, temporal constraints, and concurrency needs.
* Furthermore, the reservation might be composed of multiple stages, with
* ordering dependencies among them.
* </p>
*
* <p>
* In order to respond, a new admission control component in the
* {@code ResourceManager} performs an analysis of the resources that have
* been committed over the period of time the user is requesting, verify that
* the user requests can be fulfilled, and that it respect a sharing policy
* (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
* that the ReservationSubmissionRequest is satisfiable the
* {@code ResourceManager} answers with a
* {@link ReservationSubmissionResponse} that include a non-null
* {@link ReservationId}. Upon failure to find a valid allocation the response
* is an exception with the reason.
*
* On application submission the client can use this {@link ReservationId} to
* obtain access to the reserved resources.
* </p>
*
* <p>
* The system guarantees that during the time-range specified by the user, the
* reservationID will be corresponding to a valid reservation. The amount of
* capacity dedicated to such queue can vary overtime, depending of the
* allocation that has been determined. But it is guaranteed to satisfy all
* the constraint expressed by the user in the
* {@link ReservationSubmissionRequest}.
* </p>
*
* @param request the request to submit a new Reservation
* @return response the {@link ReservationId} on accepting the submission
* @throws YarnException if the request is invalid or reservation cannot be
* created successfully
* @throws IOException
*
*/
@Public
@Unstable
public ReservationSubmissionResponse submitReservation(
ReservationSubmissionRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to update an existing Reservation. This is
* referred to as a re-negotiation process, in which a user that has
* previously submitted a Reservation.
* </p>
*
* <p>
* The allocation is attempted by virtually substituting all previous
* allocations related to this Reservation with new ones, that satisfy the new
* {@link ReservationUpdateRequest}. Upon success the previous allocation is
* substituted by the new one, and on failure (i.e., if the system cannot find
* a valid allocation for the updated request), the previous allocation
* remains valid.
*
* The {@link ReservationId} is not changed, and applications currently
* running within this reservation will automatically receive the resources
* based on the new allocation.
* </p>
*
* @param request to update an existing Reservation (the ReservationRequest
* should refer to an existing valid {@link ReservationId})
* @return response empty on successfully updating the existing reservation
* @throws YarnException if the request is invalid or reservation cannot be
* updated successfully
* @throws IOException
*
*/
@Public
@Unstable
public ReservationUpdateResponse updateReservation(
ReservationUpdateRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by clients to remove an existing Reservation.
*
* Upon deletion of a reservation applications running with this reservation,
* are automatically downgraded to normal jobs running without any dedicated
* reservation.
* </p>
*
* @param request to remove an existing Reservation (the ReservationRequest
* should refer to an existing valid {@link ReservationId})
* @return response empty on successfully deleting the existing reservation
* @throws YarnException if the request is invalid or reservation cannot be
* deleted successfully
* @throws IOException
*
*/
@Public
@Unstable
public ReservationDeleteResponse deleteReservation(
ReservationDeleteRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by client to get node to labels mappings in existing cluster
* </p>
*
* @param request
* @return node to labels mappings
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
public GetNodesToLabelsResponse getNodeToLabels(
GetNodesToLabelsRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by client to get labels to nodes mappings
* in existing cluster
* </p>
*
* @param request
* @return labels to nodes mappings
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
public GetLabelsToNodesResponse getLabelsToNodes(
GetLabelsToNodesRequest request) throws YarnException, IOException;
/**
* <p>
* The interface used by client to get node labels in the cluster
* </p>
*
* @param request to get node labels collection of this cluster
* @return node labels collection of this cluster
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
public GetClusterNodeLabelsResponse getClusterNodeLabels(
GetClusterNodeLabelsRequest request) throws YarnException, IOException;
}
| 17,182 | 39.621749 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* The protocol between clients and the <code>ApplicationHistoryServer</code> to
* get the information of completed applications etc.
* </p>
*/
@Public
@Unstable
public interface ApplicationHistoryProtocol extends ApplicationBaseProtocol {
}
| 1,237 | 35.411765 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.