repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
.FSAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import com.codahale.metrics.Gauge;
import org.apache.hadoop.yarn.sls.SLSRunner;
@Private
@Unstable
public class FairSchedulerMetrics extends SchedulerMetrics {
private int totalMemoryMB = Integer.MAX_VALUE;
private int totalVCores = Integer.MAX_VALUE;
private boolean maxReset = false;
public FairSchedulerMetrics() {
super();
appTrackedMetrics.add("demand.memory");
appTrackedMetrics.add("demand.vcores");
appTrackedMetrics.add("usage.memory");
appTrackedMetrics.add("usage.vcores");
appTrackedMetrics.add("minshare.memory");
appTrackedMetrics.add("minshare.vcores");
appTrackedMetrics.add("maxshare.memory");
appTrackedMetrics.add("maxshare.vcores");
appTrackedMetrics.add("fairshare.memory");
appTrackedMetrics.add("fairshare.vcores");
queueTrackedMetrics.add("demand.memory");
queueTrackedMetrics.add("demand.vcores");
queueTrackedMetrics.add("usage.memory");
queueTrackedMetrics.add("usage.vcores");
queueTrackedMetrics.add("minshare.memory");
queueTrackedMetrics.add("minshare.vcores");
queueTrackedMetrics.add("maxshare.memory");
queueTrackedMetrics.add("maxshare.vcores");
queueTrackedMetrics.add("fairshare.memory");
queueTrackedMetrics.add("fairshare.vcores");
}
@Override
public void trackApp(ApplicationAttemptId appAttemptId, String oldAppId) {
super.trackApp(appAttemptId, oldAppId);
FairScheduler fair = (FairScheduler) scheduler;
final FSAppAttempt app = fair.getSchedulerApp(appAttemptId);
metrics.register("variable.app." + oldAppId + ".demand.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getDemand().getMemory();
}
}
);
metrics.register("variable.app." + oldAppId + ".demand.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getDemand().getVirtualCores();
}
}
);
metrics.register("variable.app." + oldAppId + ".usage.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getResourceUsage().getMemory();
}
}
);
metrics.register("variable.app." + oldAppId + ".usage.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getResourceUsage().getVirtualCores();
}
}
);
metrics.register("variable.app." + oldAppId + ".minshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getMinShare().getMemory();
}
}
);
metrics.register("variable.app." + oldAppId + ".minshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getMinShare().getMemory();
}
}
);
metrics.register("variable.app." + oldAppId + ".maxshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return Math.min(app.getMaxShare().getMemory(), totalMemoryMB);
}
}
);
metrics.register("variable.app." + oldAppId + ".maxshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return Math.min(app.getMaxShare().getVirtualCores(), totalVCores);
}
}
);
metrics.register("variable.app." + oldAppId + ".fairshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getFairShare().getVirtualCores();
}
}
);
metrics.register("variable.app." + oldAppId + ".fairshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getFairShare().getVirtualCores();
}
}
);
}
@Override
public void trackQueue(String queueName) {
trackedQueues.add(queueName);
FairScheduler fair = (FairScheduler) scheduler;
final FSQueue queue = fair.getQueueManager().getQueue(queueName);
metrics.register("variable.queue." + queueName + ".demand.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getDemand().getMemory();
}
}
);
metrics.register("variable.queue." + queueName + ".demand.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getDemand().getVirtualCores();
}
}
);
metrics.register("variable.queue." + queueName + ".usage.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getResourceUsage().getMemory();
}
}
);
metrics.register("variable.queue." + queueName + ".usage.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getResourceUsage().getVirtualCores();
}
}
);
metrics.register("variable.queue." + queueName + ".minshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getMinShare().getMemory();
}
}
);
metrics.register("variable.queue." + queueName + ".minshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getMinShare().getVirtualCores();
}
}
);
metrics.register("variable.queue." + queueName + ".maxshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if (! maxReset &&
SLSRunner.simulateInfoMap.containsKey("Number of nodes") &&
SLSRunner.simulateInfoMap.containsKey("Node memory (MB)") &&
SLSRunner.simulateInfoMap.containsKey("Node VCores")) {
int numNMs = Integer.parseInt(
SLSRunner.simulateInfoMap.get("Number of nodes").toString());
int numMemoryMB = Integer.parseInt(
SLSRunner.simulateInfoMap.get("Node memory (MB)").toString());
int numVCores = Integer.parseInt(
SLSRunner.simulateInfoMap.get("Node VCores").toString());
totalMemoryMB = numNMs * numMemoryMB;
totalVCores = numNMs * numVCores;
maxReset = false;
}
return Math.min(queue.getMaxShare().getMemory(), totalMemoryMB);
}
}
);
metrics.register("variable.queue." + queueName + ".maxshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return Math.min(queue.getMaxShare().getVirtualCores(), totalVCores);
}
}
);
metrics.register("variable.queue." + queueName + ".fairshare.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getFairShare().getMemory();
}
}
);
metrics.register("variable.queue." + queueName + ".fairshare.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
return queue.getFairShare().getVirtualCores();
}
}
);
}
@Override
public void untrackQueue(String queueName) {
trackedQueues.remove(queueName);
metrics.remove("variable.queue." + queueName + ".demand.memory");
metrics.remove("variable.queue." + queueName + ".demand.vcores");
metrics.remove("variable.queue." + queueName + ".usage.memory");
metrics.remove("variable.queue." + queueName + ".usage.vcores");
metrics.remove("variable.queue." + queueName + ".minshare.memory");
metrics.remove("variable.queue." + queueName + ".minshare.vcores");
metrics.remove("variable.queue." + queueName + ".maxshare.memory");
metrics.remove("variable.queue." + queueName + ".maxshare.vcores");
metrics.remove("variable.queue." + queueName + ".fairshare.memory");
metrics.remove("variable.queue." + queueName + ".fairshare.vcores");
}
}
| 9,362 | 33.806691 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class CapacitySchedulerMetrics extends SchedulerMetrics {
public CapacitySchedulerMetrics() {
super();
}
@Override
public void trackQueue(String queueName) {
trackedQueues.add(queueName);
}
}
| 1,230 | 32.27027 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
.NodeUpdateSchedulerEvent;
@Private
@Unstable
public class NodeUpdateSchedulerEventWrapper extends NodeUpdateSchedulerEvent {
public NodeUpdateSchedulerEventWrapper(NodeUpdateSchedulerEvent event) {
super(new RMNodeWrapper(event.getRMNode()));
}
}
| 1,331 | 37.057143 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.sls.SLSRunner;
import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
import org.apache.hadoop.yarn.sls.web.SLSWebApp;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.log4j.Logger;
import com.codahale.metrics.Counter;
import com.codahale.metrics.CsvReporter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SlidingWindowReservoir;
import com.codahale.metrics.Timer;
@Private
@Unstable
public class ResourceSchedulerWrapper
extends AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>
implements ResourceScheduler, Configurable {
private static final String EOL = System.getProperty("line.separator");
private static final int SAMPLING_SIZE = 60;
private ScheduledExecutorService pool;
// counters for scheduler allocate/handle operations
private Counter schedulerAllocateCounter;
private Counter schedulerHandleCounter;
private Map<SchedulerEventType, Counter> schedulerHandleCounterMap;
// Timers for scheduler allocate/handle operations
private Timer schedulerAllocateTimer;
private Timer schedulerHandleTimer;
private Map<SchedulerEventType, Timer> schedulerHandleTimerMap;
private List<Histogram> schedulerHistogramList;
private Map<Histogram, Timer> histogramTimerMap;
private Lock samplerLock;
private Lock queueLock;
private Configuration conf;
private ResourceScheduler scheduler;
private Map<ApplicationId, String> appQueueMap =
new ConcurrentHashMap<ApplicationId, String>();
private BufferedWriter jobRuntimeLogBW;
// Priority of the ResourceSchedulerWrapper shutdown hook.
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
// web app
private SLSWebApp web;
private Map<ContainerId, Resource> preemptionContainerMap =
new ConcurrentHashMap<ContainerId, Resource>();
// metrics
private MetricRegistry metrics;
private SchedulerMetrics schedulerMetrics;
private boolean metricsON;
private String metricsOutputDir;
private BufferedWriter metricsLogBW;
private boolean running = false;
private static Map<Class, Class> defaultSchedulerMetricsMap =
new HashMap<Class, Class>();
static {
defaultSchedulerMetricsMap.put(FairScheduler.class,
FairSchedulerMetrics.class);
defaultSchedulerMetricsMap.put(FifoScheduler.class,
FifoSchedulerMetrics.class);
defaultSchedulerMetricsMap.put(CapacityScheduler.class,
CapacitySchedulerMetrics.class);
}
// must set by outside
private Set<String> queueSet;
private Set<String> trackedAppSet;
public final Logger LOG = Logger.getLogger(ResourceSchedulerWrapper.class);
public ResourceSchedulerWrapper() {
super(ResourceSchedulerWrapper.class.getName());
samplerLock = new ReentrantLock();
queueLock = new ReentrantLock();
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
// set scheduler
Class<? extends ResourceScheduler> klass =
conf.getClass(SLSConfiguration.RM_SCHEDULER, null,
ResourceScheduler.class);
scheduler = ReflectionUtils.newInstance(klass, conf);
// start metrics
metricsON = conf.getBoolean(SLSConfiguration.METRICS_SWITCH, true);
if (metricsON) {
try {
initMetrics();
} catch (Exception e) {
e.printStackTrace();
}
}
ShutdownHookManager.get().addShutdownHook(new Runnable() {
@Override
public void run() {
try {
if (metricsLogBW != null) {
metricsLogBW.write("]");
metricsLogBW.close();
}
if (web != null) {
web.stop();
}
tearDown();
} catch (Exception e) {
e.printStackTrace();
}
}
}, SHUTDOWN_HOOK_PRIORITY);
}
@Override
public Allocation allocate(ApplicationAttemptId attemptId,
List<ResourceRequest> resourceRequests,
List<ContainerId> containerIds,
List<String> strings, List<String> strings2) {
if (metricsON) {
final Timer.Context context = schedulerAllocateTimer.time();
Allocation allocation = null;
try {
allocation = scheduler.allocate(attemptId, resourceRequests,
containerIds, strings, strings2);
return allocation;
} finally {
context.stop();
schedulerAllocateCounter.inc();
try {
updateQueueWithAllocateRequest(allocation, attemptId,
resourceRequests, containerIds);
} catch (IOException e) {
e.printStackTrace();
}
}
} else {
return scheduler.allocate(attemptId,
resourceRequests, containerIds, strings, strings2);
}
}
@Override
public void handle(SchedulerEvent schedulerEvent) {
// metrics off
if (! metricsON) {
scheduler.handle(schedulerEvent);
return;
}
if(!running) running = true;
// metrics on
Timer.Context handlerTimer = null;
Timer.Context operationTimer = null;
NodeUpdateSchedulerEventWrapper eventWrapper;
try {
//if (schedulerEvent instanceof NodeUpdateSchedulerEvent) {
if (schedulerEvent.getType() == SchedulerEventType.NODE_UPDATE
&& schedulerEvent instanceof NodeUpdateSchedulerEvent) {
eventWrapper = new NodeUpdateSchedulerEventWrapper(
(NodeUpdateSchedulerEvent)schedulerEvent);
schedulerEvent = eventWrapper;
updateQueueWithNodeUpdate(eventWrapper);
} else if (schedulerEvent.getType() == SchedulerEventType.APP_ATTEMPT_REMOVED
&& schedulerEvent instanceof AppAttemptRemovedSchedulerEvent) {
// check if having AM Container, update resource usage information
AppAttemptRemovedSchedulerEvent appRemoveEvent =
(AppAttemptRemovedSchedulerEvent) schedulerEvent;
ApplicationAttemptId appAttemptId =
appRemoveEvent.getApplicationAttemptID();
String queue = appQueueMap.get(appAttemptId.getApplicationId());
SchedulerAppReport app = scheduler.getSchedulerAppInfo(appAttemptId);
if (! app.getLiveContainers().isEmpty()) { // have 0 or 1
// should have one container which is AM container
RMContainer rmc = app.getLiveContainers().iterator().next();
updateQueueMetrics(queue,
rmc.getContainer().getResource().getMemory(),
rmc.getContainer().getResource().getVirtualCores());
}
}
handlerTimer = schedulerHandleTimer.time();
operationTimer = schedulerHandleTimerMap
.get(schedulerEvent.getType()).time();
scheduler.handle(schedulerEvent);
} finally {
if (handlerTimer != null) handlerTimer.stop();
if (operationTimer != null) operationTimer.stop();
schedulerHandleCounter.inc();
schedulerHandleCounterMap.get(schedulerEvent.getType()).inc();
if (schedulerEvent.getType() == SchedulerEventType.APP_REMOVED
&& schedulerEvent instanceof AppRemovedSchedulerEvent) {
SLSRunner.decreaseRemainingApps();
AppRemovedSchedulerEvent appRemoveEvent =
(AppRemovedSchedulerEvent) schedulerEvent;
appQueueMap.remove(appRemoveEvent.getApplicationID());
} else if (schedulerEvent.getType() == SchedulerEventType.APP_ADDED
&& schedulerEvent instanceof AppAddedSchedulerEvent) {
AppAddedSchedulerEvent appAddEvent =
(AppAddedSchedulerEvent) schedulerEvent;
String queueName = appAddEvent.getQueue();
appQueueMap.put(appAddEvent.getApplicationId(), queueName);
}
}
}
private void updateQueueWithNodeUpdate(
NodeUpdateSchedulerEventWrapper eventWrapper) {
RMNodeWrapper node = (RMNodeWrapper) eventWrapper.getRMNode();
List<UpdatedContainerInfo> containerList = node.getContainerUpdates();
for (UpdatedContainerInfo info : containerList) {
for (ContainerStatus status : info.getCompletedContainers()) {
ContainerId containerId = status.getContainerId();
SchedulerAppReport app = scheduler.getSchedulerAppInfo(
containerId.getApplicationAttemptId());
if (app == null) {
// this happens for the AM container
// The app have already removed when the NM sends the release
// information.
continue;
}
String queue =
appQueueMap.get(containerId.getApplicationAttemptId()
.getApplicationId());
int releasedMemory = 0, releasedVCores = 0;
if (status.getExitStatus() == ContainerExitStatus.SUCCESS) {
for (RMContainer rmc : app.getLiveContainers()) {
if (rmc.getContainerId() == containerId) {
releasedMemory += rmc.getContainer().getResource().getMemory();
releasedVCores += rmc.getContainer()
.getResource().getVirtualCores();
break;
}
}
} else if (status.getExitStatus() == ContainerExitStatus.ABORTED) {
if (preemptionContainerMap.containsKey(containerId)) {
Resource preResource = preemptionContainerMap.get(containerId);
releasedMemory += preResource.getMemory();
releasedVCores += preResource.getVirtualCores();
preemptionContainerMap.remove(containerId);
}
}
// update queue counters
updateQueueMetrics(queue, releasedMemory, releasedVCores);
}
}
}
private void updateQueueWithAllocateRequest(Allocation allocation,
ApplicationAttemptId attemptId,
List<ResourceRequest> resourceRequests,
List<ContainerId> containerIds) throws IOException {
// update queue information
Resource pendingResource = Resources.createResource(0, 0);
Resource allocatedResource = Resources.createResource(0, 0);
String queueName = appQueueMap.get(attemptId.getApplicationId());
// container requested
for (ResourceRequest request : resourceRequests) {
if (request.getResourceName().equals(ResourceRequest.ANY)) {
Resources.addTo(pendingResource,
Resources.multiply(request.getCapability(),
request.getNumContainers()));
}
}
// container allocated
for (Container container : allocation.getContainers()) {
Resources.addTo(allocatedResource, container.getResource());
Resources.subtractFrom(pendingResource, container.getResource());
}
// container released from AM
SchedulerAppReport report = scheduler.getSchedulerAppInfo(attemptId);
for (ContainerId containerId : containerIds) {
Container container = null;
for (RMContainer c : report.getLiveContainers()) {
if (c.getContainerId().equals(containerId)) {
container = c.getContainer();
break;
}
}
if (container != null) {
// released allocated containers
Resources.subtractFrom(allocatedResource, container.getResource());
} else {
for (RMContainer c : report.getReservedContainers()) {
if (c.getContainerId().equals(containerId)) {
container = c.getContainer();
break;
}
}
if (container != null) {
// released reserved containers
Resources.subtractFrom(pendingResource, container.getResource());
}
}
}
// containers released/preemption from scheduler
Set<ContainerId> preemptionContainers = new HashSet<ContainerId>();
if (allocation.getContainerPreemptions() != null) {
preemptionContainers.addAll(allocation.getContainerPreemptions());
}
if (allocation.getStrictContainerPreemptions() != null) {
preemptionContainers.addAll(allocation.getStrictContainerPreemptions());
}
if (! preemptionContainers.isEmpty()) {
for (ContainerId containerId : preemptionContainers) {
if (! preemptionContainerMap.containsKey(containerId)) {
Container container = null;
for (RMContainer c : report.getLiveContainers()) {
if (c.getContainerId().equals(containerId)) {
container = c.getContainer();
break;
}
}
if (container != null) {
preemptionContainerMap.put(containerId, container.getResource());
}
}
}
}
// update metrics
SortedMap<String, Counter> counterMap = metrics.getCounters();
String names[] = new String[]{
"counter.queue." + queueName + ".pending.memory",
"counter.queue." + queueName + ".pending.cores",
"counter.queue." + queueName + ".allocated.memory",
"counter.queue." + queueName + ".allocated.cores"};
int values[] = new int[]{pendingResource.getMemory(),
pendingResource.getVirtualCores(),
allocatedResource.getMemory(), allocatedResource.getVirtualCores()};
for (int i = names.length - 1; i >= 0; i --) {
if (! counterMap.containsKey(names[i])) {
metrics.counter(names[i]);
counterMap = metrics.getCounters();
}
counterMap.get(names[i]).inc(values[i]);
}
queueLock.lock();
try {
if (! schedulerMetrics.isTracked(queueName)) {
schedulerMetrics.trackQueue(queueName);
}
} finally {
queueLock.unlock();
}
}
private void tearDown() throws IOException {
// close job runtime writer
if (jobRuntimeLogBW != null) {
jobRuntimeLogBW.close();
}
// shut pool
if (pool != null) pool.shutdown();
}
@SuppressWarnings("unchecked")
private void initMetrics() throws Exception {
metrics = new MetricRegistry();
// configuration
metricsOutputDir = conf.get(SLSConfiguration.METRICS_OUTPUT_DIR);
int metricsWebAddressPort = conf.getInt(
SLSConfiguration.METRICS_WEB_ADDRESS_PORT,
SLSConfiguration.METRICS_WEB_ADDRESS_PORT_DEFAULT);
// create SchedulerMetrics for current scheduler
String schedulerMetricsType = conf.get(scheduler.getClass().getName());
Class schedulerMetricsClass = schedulerMetricsType == null?
defaultSchedulerMetricsMap.get(scheduler.getClass()) :
Class.forName(schedulerMetricsType);
schedulerMetrics = (SchedulerMetrics)ReflectionUtils
.newInstance(schedulerMetricsClass, new Configuration());
schedulerMetrics.init(scheduler, metrics);
// register various metrics
registerJvmMetrics();
registerClusterResourceMetrics();
registerContainerAppNumMetrics();
registerSchedulerMetrics();
// .csv output
initMetricsCSVOutput();
// start web app to provide real-time tracking
web = new SLSWebApp(this, metricsWebAddressPort);
web.start();
// a thread to update histogram timer
pool = new ScheduledThreadPoolExecutor(2);
pool.scheduleAtFixedRate(new HistogramsRunnable(), 0, 1000,
TimeUnit.MILLISECONDS);
// a thread to output metrics for real-tiem tracking
pool.scheduleAtFixedRate(new MetricsLogRunnable(), 0, 1000,
TimeUnit.MILLISECONDS);
// application running information
jobRuntimeLogBW =
new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
metricsOutputDir + "/jobruntime.csv"), "UTF-8"));
jobRuntimeLogBW.write("JobID,real_start_time,real_end_time," +
"simulate_start_time,simulate_end_time" + EOL);
jobRuntimeLogBW.flush();
}
private void registerJvmMetrics() {
// add JVM gauges
metrics.register("variable.jvm.free.memory",
new Gauge<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().freeMemory();
}
}
);
metrics.register("variable.jvm.max.memory",
new Gauge<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().maxMemory();
}
}
);
metrics.register("variable.jvm.total.memory",
new Gauge<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().totalMemory();
}
}
);
}
private void registerClusterResourceMetrics() {
metrics.register("variable.cluster.allocated.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAllocatedMB();
}
}
}
);
metrics.register("variable.cluster.allocated.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAllocatedVirtualCores();
}
}
}
);
metrics.register("variable.cluster.available.memory",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAvailableMB();
}
}
}
);
metrics.register("variable.cluster.available.vcores",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAvailableVirtualCores();
}
}
}
);
}
private void registerContainerAppNumMetrics() {
metrics.register("variable.running.application",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if (scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAppsRunning();
}
}
}
);
metrics.register("variable.running.container",
new Gauge<Integer>() {
@Override
public Integer getValue() {
if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
return 0;
} else {
return scheduler.getRootQueueMetrics().getAllocatedContainers();
}
}
}
);
}
private void registerSchedulerMetrics() {
samplerLock.lock();
try {
// counters for scheduler operations
schedulerAllocateCounter = metrics.counter(
"counter.scheduler.operation.allocate");
schedulerHandleCounter = metrics.counter(
"counter.scheduler.operation.handle");
schedulerHandleCounterMap = new HashMap<SchedulerEventType, Counter>();
for (SchedulerEventType e : SchedulerEventType.values()) {
Counter counter = metrics.counter(
"counter.scheduler.operation.handle." + e);
schedulerHandleCounterMap.put(e, counter);
}
// timers for scheduler operations
int timeWindowSize = conf.getInt(
SLSConfiguration.METRICS_TIMER_WINDOW_SIZE,
SLSConfiguration.METRICS_TIMER_WINDOW_SIZE_DEFAULT);
schedulerAllocateTimer = new Timer(
new SlidingWindowReservoir(timeWindowSize));
schedulerHandleTimer = new Timer(
new SlidingWindowReservoir(timeWindowSize));
schedulerHandleTimerMap = new HashMap<SchedulerEventType, Timer>();
for (SchedulerEventType e : SchedulerEventType.values()) {
Timer timer = new Timer(new SlidingWindowReservoir(timeWindowSize));
schedulerHandleTimerMap.put(e, timer);
}
// histogram for scheduler operations (Samplers)
schedulerHistogramList = new ArrayList<Histogram>();
histogramTimerMap = new HashMap<Histogram, Timer>();
Histogram schedulerAllocateHistogram = new Histogram(
new SlidingWindowReservoir(SAMPLING_SIZE));
metrics.register("sampler.scheduler.operation.allocate.timecost",
schedulerAllocateHistogram);
schedulerHistogramList.add(schedulerAllocateHistogram);
histogramTimerMap.put(schedulerAllocateHistogram, schedulerAllocateTimer);
Histogram schedulerHandleHistogram = new Histogram(
new SlidingWindowReservoir(SAMPLING_SIZE));
metrics.register("sampler.scheduler.operation.handle.timecost",
schedulerHandleHistogram);
schedulerHistogramList.add(schedulerHandleHistogram);
histogramTimerMap.put(schedulerHandleHistogram, schedulerHandleTimer);
for (SchedulerEventType e : SchedulerEventType.values()) {
Histogram histogram = new Histogram(
new SlidingWindowReservoir(SAMPLING_SIZE));
metrics.register(
"sampler.scheduler.operation.handle." + e + ".timecost",
histogram);
schedulerHistogramList.add(histogram);
histogramTimerMap.put(histogram, schedulerHandleTimerMap.get(e));
}
} finally {
samplerLock.unlock();
}
}
private void initMetricsCSVOutput() {
int timeIntervalMS = conf.getInt(
SLSConfiguration.METRICS_RECORD_INTERVAL_MS,
SLSConfiguration.METRICS_RECORD_INTERVAL_MS_DEFAULT);
File dir = new File(metricsOutputDir + "/metrics");
if(! dir.exists()
&& ! dir.mkdirs()) {
LOG.error("Cannot create directory " + dir.getAbsoluteFile());
}
final CsvReporter reporter = CsvReporter.forRegistry(metrics)
.formatFor(Locale.US)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build(new File(metricsOutputDir + "/metrics"));
reporter.start(timeIntervalMS, TimeUnit.MILLISECONDS);
}
class HistogramsRunnable implements Runnable {
@Override
public void run() {
samplerLock.lock();
try {
for (Histogram histogram : schedulerHistogramList) {
Timer timer = histogramTimerMap.get(histogram);
histogram.update((int) timer.getSnapshot().getMean());
}
} finally {
samplerLock.unlock();
}
}
}
class MetricsLogRunnable implements Runnable {
private boolean firstLine = true;
public MetricsLogRunnable() {
try {
metricsLogBW =
new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
metricsOutputDir + "/realtimetrack.json"), "UTF-8"));
metricsLogBW.write("[");
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void run() {
if(running) {
// all WebApp to get real tracking json
String metrics = web.generateRealTimeTrackingMetrics();
// output
try {
if(firstLine) {
metricsLogBW.write(metrics + EOL);
firstLine = false;
} else {
metricsLogBW.write("," + metrics + EOL);
}
metricsLogBW.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
// the following functions are used by AMSimulator
public void addAMRuntime(ApplicationId appId,
long traceStartTimeMS, long traceEndTimeMS,
long simulateStartTimeMS, long simulateEndTimeMS) {
if (metricsON) {
try {
// write job runtime information
StringBuilder sb = new StringBuilder();
sb.append(appId).append(",").append(traceStartTimeMS).append(",")
.append(traceEndTimeMS).append(",").append(simulateStartTimeMS)
.append(",").append(simulateEndTimeMS);
jobRuntimeLogBW.write(sb.toString() + EOL);
jobRuntimeLogBW.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
}
private void updateQueueMetrics(String queue,
int releasedMemory, int releasedVCores) {
// update queue counters
SortedMap<String, Counter> counterMap = metrics.getCounters();
if (releasedMemory != 0) {
String name = "counter.queue." + queue + ".allocated.memory";
if (! counterMap.containsKey(name)) {
metrics.counter(name);
counterMap = metrics.getCounters();
}
counterMap.get(name).inc(-releasedMemory);
}
if (releasedVCores != 0) {
String name = "counter.queue." + queue + ".allocated.cores";
if (! counterMap.containsKey(name)) {
metrics.counter(name);
counterMap = metrics.getCounters();
}
counterMap.get(name).inc(-releasedVCores);
}
}
public void setQueueSet(Set<String> queues) {
this.queueSet = queues;
}
public Set<String> getQueueSet() {
return this.queueSet;
}
public void setTrackedAppSet(Set<String> apps) {
this.trackedAppSet = apps;
}
public Set<String> getTrackedAppSet() {
return this.trackedAppSet;
}
public MetricRegistry getMetrics() {
return metrics;
}
public SchedulerMetrics getSchedulerMetrics() {
return schedulerMetrics;
}
// API open to out classes
public void addTrackedApp(ApplicationAttemptId appAttemptId,
String oldAppId) {
if (metricsON) {
schedulerMetrics.trackApp(appAttemptId, oldAppId);
}
}
public void removeTrackedApp(ApplicationAttemptId appAttemptId,
String oldAppId) {
if (metricsON) {
schedulerMetrics.untrackApp(appAttemptId, oldAppId);
}
}
@Override
public Configuration getConf() {
return conf;
}
@SuppressWarnings("unchecked")
@Override
public void serviceInit(Configuration conf) throws Exception {
((AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>)
scheduler).init(conf);
super.serviceInit(conf);
}
@SuppressWarnings("unchecked")
@Override
public void serviceStart() throws Exception {
((AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>)
scheduler).start();
super.serviceStart();
}
@SuppressWarnings("unchecked")
@Override
public void serviceStop() throws Exception {
((AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>)
scheduler).stop();
super.serviceStop();
}
@Override
public void setRMContext(RMContext rmContext) {
scheduler.setRMContext(rmContext);
}
@Override
public void reinitialize(Configuration conf, RMContext rmContext)
throws IOException {
scheduler.reinitialize(conf, rmContext);
}
@Override
public void recover(RMStateStore.RMState rmState) throws Exception {
scheduler.recover(rmState);
}
@Override
public QueueInfo getQueueInfo(String s, boolean b, boolean b2)
throws IOException {
return scheduler.getQueueInfo(s, b, b2);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo() {
return scheduler.getQueueUserAclInfo();
}
@Override
public Resource getMinimumResourceCapability() {
return scheduler.getMinimumResourceCapability();
}
@Override
public Resource getMaximumResourceCapability() {
return scheduler.getMaximumResourceCapability();
}
@Override
public ResourceCalculator getResourceCalculator() {
return scheduler.getResourceCalculator();
}
@Override
public int getNumClusterNodes() {
return scheduler.getNumClusterNodes();
}
@Override
public SchedulerNodeReport getNodeReport(NodeId nodeId) {
return scheduler.getNodeReport(nodeId);
}
@Override
public SchedulerAppReport getSchedulerAppInfo(
ApplicationAttemptId attemptId) {
return scheduler.getSchedulerAppInfo(attemptId);
}
@Override
public QueueMetrics getRootQueueMetrics() {
return scheduler.getRootQueueMetrics();
}
@Override
public synchronized boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName) {
return scheduler.checkAccess(callerUGI, acl, queueName);
}
@Override
public ApplicationResourceUsageReport getAppResourceUsageReport(
ApplicationAttemptId appAttemptId) {
return scheduler.getAppResourceUsageReport(appAttemptId);
}
@Override
public List<ApplicationAttemptId> getAppsInQueue(String queue) {
return scheduler.getAppsInQueue(queue);
}
@Override
public RMContainer getRMContainer(ContainerId containerId) {
return null;
}
@Override
public String moveApplication(ApplicationId appId, String newQueue)
throws YarnException {
return scheduler.moveApplication(appId, newQueue);
}
@Override
@LimitedPrivate("yarn")
@Unstable
public Resource getClusterResource() {
return null;
}
@Override
public synchronized List<Container> getTransferredContainers(
ApplicationAttemptId currentAttempt) {
return new ArrayList<Container>();
}
@Override
public Map<ApplicationId, SchedulerApplication<SchedulerApplicationAttempt>>
getSchedulerApplications() {
return new HashMap<ApplicationId,
SchedulerApplication<SchedulerApplicationAttempt>>();
}
@Override
protected void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
// do nothing
}
@Override
public Priority checkAndGetApplicationPriority(Priority priority,
String user, String queueName, ApplicationId applicationId)
throws YarnException {
// TODO Dummy implementation.
return Priority.newInstance(0);
}
}
| 34,668 | 34.926425 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo
.FifoScheduler;
import com.codahale.metrics.Gauge;
@Private
@Unstable
public class FifoSchedulerMetrics extends SchedulerMetrics {
public FifoSchedulerMetrics() {
super();
}
@Override
public void trackQueue(String queueName) {
trackedQueues.add(queueName);
FifoScheduler fifo = (FifoScheduler) scheduler;
// for FifoScheduler, only DEFAULT_QUEUE
// here the three parameters doesn't affect results
final QueueInfo queue = fifo.getQueueInfo(queueName, false, false);
// track currentCapacity, maximumCapacity (always 1.0f)
metrics.register("variable.queue." + queueName + ".currentcapacity",
new Gauge<Float>() {
@Override
public Float getValue() {
return queue.getCurrentCapacity();
}
}
);
metrics.register("variable.queue." + queueName + ".",
new Gauge<Float>() {
@Override
public Float getValue() {
return queue.getCurrentCapacity();
}
}
);
}
}
| 2,119 | 32.650794 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler
.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler
.SchedulerAppReport;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.MetricRegistry;
@Private
@Unstable
public abstract class SchedulerMetrics {
protected ResourceScheduler scheduler;
protected Set<String> trackedQueues;
protected MetricRegistry metrics;
protected Set<String> appTrackedMetrics;
protected Set<String> queueTrackedMetrics;
public SchedulerMetrics() {
appTrackedMetrics = new HashSet<String>();
appTrackedMetrics.add("live.containers");
appTrackedMetrics.add("reserved.containers");
queueTrackedMetrics = new HashSet<String>();
}
public void init(ResourceScheduler scheduler, MetricRegistry metrics) {
this.scheduler = scheduler;
this.trackedQueues = new HashSet<String>();
this.metrics = metrics;
}
public void trackApp(final ApplicationAttemptId appAttemptId,
String oldAppId) {
metrics.register("variable.app." + oldAppId + ".live.containers",
new Gauge<Integer>() {
@Override
public Integer getValue() {
SchedulerAppReport app = scheduler.getSchedulerAppInfo(appAttemptId);
return app.getLiveContainers().size();
}
}
);
metrics.register("variable.app." + oldAppId + ".reserved.containers",
new Gauge<Integer>() {
@Override
public Integer getValue() {
SchedulerAppReport app = scheduler.getSchedulerAppInfo(appAttemptId);
return app.getReservedContainers().size();
}
}
);
}
public void untrackApp(ApplicationAttemptId appAttemptId,
String oldAppId) {
for (String m : appTrackedMetrics) {
metrics.remove("variable.app." + oldAppId + "." + m);
}
}
public abstract void trackQueue(String queueName);
public void untrackQueue(String queueName) {
for (String m : queueTrackedMetrics) {
metrics.remove("variable.queue." + queueName + "." + m);
}
}
public boolean isTracked(String queueName) {
return trackedQueues.contains(queueName);
}
public Set<String> getAppTrackedMetrics() {
return appTrackedMetrics;
}
public Set<String> getQueueTrackedMetrics() {
return queueTrackedMetrics;
}
}
| 3,473 | 32.085714 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode
.UpdatedContainerInfo;
import java.util.Collections;
import java.util.List;
import java.util.Set;
@Private
@Unstable
public class RMNodeWrapper implements RMNode {
private RMNode node;
private List<UpdatedContainerInfo> updates;
private boolean pulled = false;
public RMNodeWrapper(RMNode node) {
this.node = node;
updates = node.pullContainerUpdates();
}
@Override
public NodeId getNodeID() {
return node.getNodeID();
}
@Override
public String getHostName() {
return node.getHostName();
}
@Override
public int getCommandPort() {
return node.getCommandPort();
}
@Override
public int getHttpPort() {
return node.getHttpPort();
}
@Override
public String getNodeAddress() {
return node.getNodeAddress();
}
@Override
public String getHttpAddress() {
return node.getHttpAddress();
}
@Override
public String getHealthReport() {
return node.getHealthReport();
}
@Override
public long getLastHealthReportTime() {
return node.getLastHealthReportTime();
}
@Override
public Resource getTotalCapability() {
return node.getTotalCapability();
}
@Override
public String getRackName() {
return node.getRackName();
}
@Override
public Node getNode() {
return node.getNode();
}
@Override
public NodeState getState() {
return node.getState();
}
@Override
public List<ContainerId> getContainersToCleanUp() {
return node.getContainersToCleanUp();
}
@Override
public List<ApplicationId> getAppsToCleanup() {
return node.getAppsToCleanup();
}
@Override
public List<ApplicationId> getRunningApps() {
return node.getRunningApps();
}
@Override
public void updateNodeHeartbeatResponseForCleanup(
NodeHeartbeatResponse nodeHeartbeatResponse) {
node.updateNodeHeartbeatResponseForCleanup(nodeHeartbeatResponse);
}
@Override
public NodeHeartbeatResponse getLastNodeHeartBeatResponse() {
return node.getLastNodeHeartBeatResponse();
}
@Override
@SuppressWarnings("unchecked")
public List<UpdatedContainerInfo> pullContainerUpdates() {
List<UpdatedContainerInfo> list = Collections.EMPTY_LIST;
if (! pulled) {
list = updates;
pulled = true;
}
return list;
}
List<UpdatedContainerInfo> getContainerUpdates() {
return updates;
}
@Override
public String getNodeManagerVersion() {
return node.getNodeManagerVersion();
}
@Override
public Set<String> getNodeLabels() {
return RMNodeLabelsManager.EMPTY_STRING_SET;
}
}
| 4,168 | 24.734568 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.web;
import java.io.File;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
import org.apache.hadoop.yarn.sls.SLSRunner;
import org.apache.hadoop.yarn.sls.scheduler.FairSchedulerMetrics;
import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.Request;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.handler.AbstractHandler;
import org.mortbay.jetty.handler.ResourceHandler;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
@Private
@Unstable
public class SLSWebApp extends HttpServlet {
private static final long serialVersionUID = 1905162041950251407L;
private transient Server server;
private transient ResourceSchedulerWrapper wrapper;
private transient MetricRegistry metrics;
private transient SchedulerMetrics schedulerMetrics;
// metrics objects
private transient Gauge jvmFreeMemoryGauge;
private transient Gauge jvmMaxMemoryGauge;
private transient Gauge jvmTotalMemoryGauge;
private transient Gauge numRunningAppsGauge;
private transient Gauge numRunningContainersGauge;
private transient Gauge allocatedMemoryGauge;
private transient Gauge allocatedVCoresGauge;
private transient Gauge availableMemoryGauge;
private transient Gauge availableVCoresGauge;
private transient Histogram allocateTimecostHistogram;
private transient Histogram handleTimecostHistogram;
private transient Map<SchedulerEventType, Histogram>
handleOperTimecostHistogramMap;
private transient Map<String, Counter> queueAllocatedMemoryCounterMap;
private transient Map<String, Counter> queueAllocatedVCoresCounterMap;
private int port;
private int ajaxUpdateTimeMS = 1000;
// html page templates
private String simulateInfoTemplate;
private String simulateTemplate;
private String trackTemplate;
{
// load templates
ClassLoader cl = Thread.currentThread().getContextClassLoader();
try {
simulateInfoTemplate = FileUtils.readFileToString(new File(
cl.getResource("simulate.info.html.template").getFile()));
simulateTemplate = FileUtils.readFileToString(new File(
cl.getResource("simulate.html.template").getFile()));
trackTemplate = FileUtils.readFileToString(new File(
cl.getResource("track.html.template").getFile()));
} catch (IOException e) {
e.printStackTrace();
}
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
in.defaultReadObject();
handleOperTimecostHistogramMap = new HashMap<>();
queueAllocatedMemoryCounterMap = new HashMap<>();
queueAllocatedVCoresCounterMap = new HashMap<>();
}
public SLSWebApp(ResourceSchedulerWrapper wrapper, int metricsAddressPort) {
this.wrapper = wrapper;
metrics = wrapper.getMetrics();
handleOperTimecostHistogramMap =
new HashMap<SchedulerEventType, Histogram>();
queueAllocatedMemoryCounterMap = new HashMap<String, Counter>();
queueAllocatedVCoresCounterMap = new HashMap<String, Counter>();
schedulerMetrics = wrapper.getSchedulerMetrics();
port = metricsAddressPort;
}
public void start() throws Exception {
// static files
final ResourceHandler staticHandler = new ResourceHandler();
staticHandler.setResourceBase("html");
Handler handler = new AbstractHandler() {
@Override
public void handle(String target, HttpServletRequest request,
HttpServletResponse response, int dispatch) {
try{
// timeunit
int timeunit = 1000; // second, divide millionsecond / 1000
String timeunitLabel = "second";
if (request.getParameter("u")!= null &&
request.getParameter("u").equalsIgnoreCase("m")) {
timeunit = 1000 * 60;
timeunitLabel = "minute";
}
// http request
if (target.equals("/")) {
printPageIndex(request, response);
} else if (target.equals("/simulate")) {
printPageSimulate(request, response, timeunit, timeunitLabel);
} else if (target.equals("/track")) {
printPageTrack(request, response, timeunit, timeunitLabel);
} else
// js/css request
if (target.startsWith("/js") || target.startsWith("/css")) {
response.setCharacterEncoding("utf-8");
staticHandler.handle(target, request, response, dispatch);
} else
// json request
if (target.equals("/simulateMetrics")) {
printJsonMetrics(request, response);
} else if (target.equals("/trackMetrics")) {
printJsonTrack(request, response);
}
} catch (Exception e) {
e.printStackTrace();
}
}
};
server = new Server(port);
server.setHandler(handler);
server.start();
}
public void stop() throws Exception {
if (server != null) {
server.stop();
}
}
/**
* index html page, show simulation info
* path ""
* @param request http request
* @param response http response
* @throws java.io.IOException
*/
private void printPageIndex(HttpServletRequest request,
HttpServletResponse response) throws IOException {
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
String simulateInfo;
if (SLSRunner.simulateInfoMap.isEmpty()) {
String empty = "<tr><td colspan='2' align='center'>" +
"No information available</td></tr>";
simulateInfo = MessageFormat.format(simulateInfoTemplate, empty);
} else {
StringBuilder info = new StringBuilder();
for (Map.Entry<String, Object> entry :
SLSRunner.simulateInfoMap.entrySet()) {
info.append("<tr>");
info.append("<td class='td1'>").append(entry.getKey()).append("</td>");
info.append("<td class='td2'>").append(entry.getValue())
.append("</td>");
info.append("</tr>");
}
simulateInfo =
MessageFormat.format(simulateInfoTemplate, info.toString());
}
response.getWriter().println(simulateInfo);
((Request) request).setHandled(true);
}
/**
* simulate html page, show several real-runtime chart
* path "/simulate"
* use d3.js
* @param request http request
* @param response http response
* @throws java.io.IOException
*/
private void printPageSimulate(HttpServletRequest request,
HttpServletResponse response, int timeunit,
String timeunitLabel)
throws IOException {
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
// queues {0}
Set<String> queues = wrapper.getQueueSet();
StringBuilder queueInfo = new StringBuilder();
int i = 0;
for (String queue : queues) {
queueInfo.append("legends[4][").append(i).append("] = 'queue.")
.append(queue).append(".allocated.memory';");
queueInfo.append("legends[5][").append(i).append("] = 'queue.")
.append(queue).append(".allocated.vcores';");
i ++;
}
// time unit label {1}
// time unit {2}
// ajax update time interval {3}
String simulateInfo = MessageFormat.format(simulateTemplate,
queueInfo.toString(), timeunitLabel, "" + timeunit,
"" + ajaxUpdateTimeMS);
response.getWriter().println(simulateInfo);
((Request) request).setHandled(true);
}
/**
* html page for tracking one queue or job
* use d3.js
* @param request http request
* @param response http response
* @throws java.io.IOException
*/
private void printPageTrack(HttpServletRequest request,
HttpServletResponse response, int timeunit,
String timeunitLabel)
throws IOException {
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
// tracked queues {0}
StringBuilder trackedQueueInfo = new StringBuilder();
Set<String> trackedQueues = wrapper.getQueueSet();
for(String queue : trackedQueues) {
trackedQueueInfo.append("<option value='Queue ").append(queue)
.append("'>").append(queue).append("</option>");
}
// tracked apps {1}
StringBuilder trackedAppInfo = new StringBuilder();
Set<String> trackedApps = wrapper.getTrackedAppSet();
for(String job : trackedApps) {
trackedAppInfo.append("<option value='Job ").append(job)
.append("'>").append(job).append("</option>");
}
// timeunit label {2}
// time unit {3}
// ajax update time {4}
// final html
String trackInfo = MessageFormat.format(trackTemplate,
trackedQueueInfo.toString(), trackedAppInfo.toString(),
timeunitLabel, "" + timeunit, "" + ajaxUpdateTimeMS);
response.getWriter().println(trackInfo);
((Request) request).setHandled(true);
}
/**
* package metrics information in a json and return
* @param request http request
* @param response http response
* @throws java.io.IOException
*/
private void printJsonMetrics(HttpServletRequest request,
HttpServletResponse response)
throws IOException {
response.setContentType("text/json");
response.setStatus(HttpServletResponse.SC_OK);
response.getWriter().println(generateRealTimeTrackingMetrics());
((Request) request).setHandled(true);
}
public String generateRealTimeTrackingMetrics() {
// JVM
double jvmFreeMemoryGB, jvmMaxMemoryGB, jvmTotalMemoryGB;
if (jvmFreeMemoryGauge == null &&
metrics.getGauges().containsKey("variable.jvm.free.memory")) {
jvmFreeMemoryGauge = metrics.getGauges().get("variable.jvm.free.memory");
}
if (jvmMaxMemoryGauge == null &&
metrics.getGauges().containsKey("variable.jvm.max.memory")) {
jvmMaxMemoryGauge = metrics.getGauges().get("variable.jvm.max.memory");
}
if (jvmTotalMemoryGauge == null &&
metrics.getGauges().containsKey("variable.jvm.total.memory")) {
jvmTotalMemoryGauge = metrics.getGauges()
.get("variable.jvm.total.memory");
}
jvmFreeMemoryGB = jvmFreeMemoryGauge == null ? 0 :
Double.parseDouble(jvmFreeMemoryGauge.getValue().toString())
/1024/1024/1024;
jvmMaxMemoryGB = jvmMaxMemoryGauge == null ? 0 :
Double.parseDouble(jvmMaxMemoryGauge.getValue().toString())
/1024/1024/1024;
jvmTotalMemoryGB = jvmTotalMemoryGauge == null ? 0 :
Double.parseDouble(jvmTotalMemoryGauge.getValue().toString())
/1024/1024/1024;
// number of running applications/containers
String numRunningApps, numRunningContainers;
if (numRunningAppsGauge == null &&
metrics.getGauges().containsKey("variable.running.application")) {
numRunningAppsGauge =
metrics.getGauges().get("variable.running.application");
}
if (numRunningContainersGauge == null &&
metrics.getGauges().containsKey("variable.running.container")) {
numRunningContainersGauge =
metrics.getGauges().get("variable.running.container");
}
numRunningApps = numRunningAppsGauge == null ? "0" :
numRunningAppsGauge.getValue().toString();
numRunningContainers = numRunningContainersGauge == null ? "0" :
numRunningContainersGauge.getValue().toString();
// cluster available/allocate resource
double allocatedMemoryGB, allocatedVCoresGB,
availableMemoryGB, availableVCoresGB;
if (allocatedMemoryGauge == null &&
metrics.getGauges()
.containsKey("variable.cluster.allocated.memory")) {
allocatedMemoryGauge = metrics.getGauges()
.get("variable.cluster.allocated.memory");
}
if (allocatedVCoresGauge == null &&
metrics.getGauges()
.containsKey("variable.cluster.allocated.vcores")) {
allocatedVCoresGauge = metrics.getGauges()
.get("variable.cluster.allocated.vcores");
}
if (availableMemoryGauge == null &&
metrics.getGauges()
.containsKey("variable.cluster.available.memory")) {
availableMemoryGauge = metrics.getGauges()
.get("variable.cluster.available.memory");
}
if (availableVCoresGauge == null &&
metrics.getGauges()
.containsKey("variable.cluster.available.vcores")) {
availableVCoresGauge = metrics.getGauges()
.get("variable.cluster.available.vcores");
}
allocatedMemoryGB = allocatedMemoryGauge == null ? 0 :
Double.parseDouble(allocatedMemoryGauge.getValue().toString())/1024;
allocatedVCoresGB = allocatedVCoresGauge == null ? 0 :
Double.parseDouble(allocatedVCoresGauge.getValue().toString());
availableMemoryGB = availableMemoryGauge == null ? 0 :
Double.parseDouble(availableMemoryGauge.getValue().toString())/1024;
availableVCoresGB = availableVCoresGauge == null ? 0 :
Double.parseDouble(availableVCoresGauge.getValue().toString());
// scheduler operation
double allocateTimecost, handleTimecost;
if (allocateTimecostHistogram == null &&
metrics.getHistograms().containsKey(
"sampler.scheduler.operation.allocate.timecost")) {
allocateTimecostHistogram = metrics.getHistograms()
.get("sampler.scheduler.operation.allocate.timecost");
}
if (handleTimecostHistogram == null &&
metrics.getHistograms().containsKey(
"sampler.scheduler.operation.handle.timecost")) {
handleTimecostHistogram = metrics.getHistograms()
.get("sampler.scheduler.operation.handle.timecost");
}
allocateTimecost = allocateTimecostHistogram == null ? 0.0 :
allocateTimecostHistogram.getSnapshot().getMean()/1000000;
handleTimecost = handleTimecostHistogram == null ? 0.0 :
handleTimecostHistogram.getSnapshot().getMean()/1000000;
// various handle operation
Map<SchedulerEventType, Double> handleOperTimecostMap =
new HashMap<SchedulerEventType, Double>();
for (SchedulerEventType e : SchedulerEventType.values()) {
String key = "sampler.scheduler.operation.handle." + e + ".timecost";
if (! handleOperTimecostHistogramMap.containsKey(e) &&
metrics.getHistograms().containsKey(key)) {
handleOperTimecostHistogramMap.put(e, metrics.getHistograms().get(key));
}
double timecost = handleOperTimecostHistogramMap.containsKey(e) ?
handleOperTimecostHistogramMap.get(e).getSnapshot().getMean()/1000000
: 0;
handleOperTimecostMap.put(e, timecost);
}
// allocated resource for each queue
Map<String, Double> queueAllocatedMemoryMap = new HashMap<String, Double>();
Map<String, Long> queueAllocatedVCoresMap = new HashMap<String, Long>();
for (String queue : wrapper.getQueueSet()) {
// memory
String key = "counter.queue." + queue + ".allocated.memory";
if (! queueAllocatedMemoryCounterMap.containsKey(queue) &&
metrics.getCounters().containsKey(key)) {
queueAllocatedMemoryCounterMap.put(queue,
metrics.getCounters().get(key));
}
double queueAllocatedMemoryGB =
queueAllocatedMemoryCounterMap.containsKey(queue) ?
queueAllocatedMemoryCounterMap.get(queue).getCount()/1024.0
: 0;
queueAllocatedMemoryMap.put(queue, queueAllocatedMemoryGB);
// vCores
key = "counter.queue." + queue + ".allocated.cores";
if (! queueAllocatedVCoresCounterMap.containsKey(queue) &&
metrics.getCounters().containsKey(key)) {
queueAllocatedVCoresCounterMap.put(
queue, metrics.getCounters().get(key));
}
long queueAllocatedVCores =
queueAllocatedVCoresCounterMap.containsKey(queue) ?
queueAllocatedVCoresCounterMap.get(queue).getCount(): 0;
queueAllocatedVCoresMap.put(queue, queueAllocatedVCores);
}
// package results
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("\"time\":" ).append(System.currentTimeMillis())
.append(",\"jvm.free.memory\":").append(jvmFreeMemoryGB)
.append(",\"jvm.max.memory\":").append(jvmMaxMemoryGB)
.append(",\"jvm.total.memory\":").append(jvmTotalMemoryGB)
.append(",\"running.applications\":").append(numRunningApps)
.append(",\"running.containers\":").append(numRunningContainers)
.append(",\"cluster.allocated.memory\":").append(allocatedMemoryGB)
.append(",\"cluster.allocated.vcores\":").append(allocatedVCoresGB)
.append(",\"cluster.available.memory\":").append(availableMemoryGB)
.append(",\"cluster.available.vcores\":").append(availableVCoresGB);
for (String queue : wrapper.getQueueSet()) {
sb.append(",\"queue.").append(queue).append(".allocated.memory\":")
.append(queueAllocatedMemoryMap.get(queue));
sb.append(",\"queue.").append(queue).append(".allocated.vcores\":")
.append(queueAllocatedVCoresMap.get(queue));
}
// scheduler allocate & handle
sb.append(",\"scheduler.allocate.timecost\":").append(allocateTimecost);
sb.append(",\"scheduler.handle.timecost\":").append(handleTimecost);
for (SchedulerEventType e : SchedulerEventType.values()) {
sb.append(",\"scheduler.handle-").append(e).append(".timecost\":")
.append(handleOperTimecostMap.get(e));
}
sb.append("}");
return sb.toString();
}
/**
* package metrics information for one tracked queue/app
* only support FairScheduler currently
* @throws java.io.IOException
*/
private void printJsonTrack(HttpServletRequest request,
HttpServletResponse response) throws IOException {
response.setContentType("text/json");
response.setStatus(HttpServletResponse.SC_OK);
StringBuilder sb = new StringBuilder();
if(schedulerMetrics instanceof FairSchedulerMetrics) {
String para = request.getParameter("t");
if (para.startsWith("Job ")) {
String appId = para.substring("Job ".length());
sb.append("{");
sb.append("\"time\": ").append(System.currentTimeMillis()).append(",");
sb.append("\"appId\": \"").append(appId).append("\"");
for(String metric : this.schedulerMetrics.getAppTrackedMetrics()) {
String key = "variable.app." + appId + "." + metric;
sb.append(",\"").append(metric).append("\": ");
if (metrics.getGauges().containsKey(key)) {
double memoryGB =
Double.parseDouble(
metrics.getGauges().get(key).getValue().toString())
/ 1024;
sb.append(memoryGB);
} else {
sb.append(-1);
}
}
sb.append("}");
} else if(para.startsWith("Queue ")) {
String queueName = para.substring("Queue ".length());
sb.append("{");
sb.append("\"time\": ").append(System.currentTimeMillis()).append(",");
sb.append("\"queueName\": \"").append(queueName).append("\"");
for(String metric : this.schedulerMetrics.getQueueTrackedMetrics()) {
String key = "variable.queue." + queueName + "." + metric;
sb.append(",\"").append(metric).append("\": ");
if (metrics.getGauges().containsKey(key)) {
double memoryGB =
Double.parseDouble(
metrics.getGauges().get(key).getValue().toString())
/ 1024;
sb.append(memoryGB);
} else {
sb.append(-1);
}
}
sb.append("}");
}
}
String output = sb.toString();
if (output.isEmpty()) {
output = "[]";
}
response.getWriter().println(output);
// package result
((Request) request).setHandled(true);
}
}
| 21,940 | 39.556377 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.nodemanager;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode
.UpdatedContainerInfo;
@Private
@Unstable
public class NodeInfo {
private static int NODE_ID = 0;
public static NodeId newNodeID(String host, int port) {
return NodeId.newInstance(host, port);
}
@Private
@Unstable
private static class FakeRMNodeImpl implements RMNode {
private NodeId nodeId;
private String hostName;
private String nodeAddr;
private String httpAddress;
private int cmdPort;
private volatile Resource perNode;
private String rackName;
private String healthReport;
private NodeState state;
private List<ContainerId> toCleanUpContainers;
private List<ApplicationId> toCleanUpApplications;
private List<ApplicationId> runningApplications;
public FakeRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
Resource perNode, String rackName, String healthReport,
int cmdPort, String hostName, NodeState state) {
this.nodeId = nodeId;
this.nodeAddr = nodeAddr;
this.httpAddress = httpAddress;
this.perNode = perNode;
this.rackName = rackName;
this.healthReport = healthReport;
this.cmdPort = cmdPort;
this.hostName = hostName;
this.state = state;
toCleanUpApplications = new ArrayList<ApplicationId>();
toCleanUpContainers = new ArrayList<ContainerId>();
runningApplications = new ArrayList<ApplicationId>();
}
public NodeId getNodeID() {
return nodeId;
}
public String getHostName() {
return hostName;
}
public int getCommandPort() {
return cmdPort;
}
public int getHttpPort() {
return 0;
}
public String getNodeAddress() {
return nodeAddr;
}
public String getHttpAddress() {
return httpAddress;
}
public String getHealthReport() {
return healthReport;
}
public long getLastHealthReportTime() {
return 0;
}
public Resource getTotalCapability() {
return perNode;
}
public String getRackName() {
return rackName;
}
public Node getNode() {
throw new UnsupportedOperationException("Not supported yet.");
}
public NodeState getState() {
return state;
}
public List<ContainerId> getContainersToCleanUp() {
return toCleanUpContainers;
}
public List<ApplicationId> getAppsToCleanup() {
return toCleanUpApplications;
}
public List<ApplicationId> getRunningApps() {
return runningApplications;
}
public void updateNodeHeartbeatResponseForCleanup(
NodeHeartbeatResponse response) {
}
public NodeHeartbeatResponse getLastNodeHeartBeatResponse() {
return null;
}
public List<UpdatedContainerInfo> pullContainerUpdates() {
ArrayList<UpdatedContainerInfo> list = new ArrayList<UpdatedContainerInfo>();
ArrayList<ContainerStatus> list2 = new ArrayList<ContainerStatus>();
for(ContainerId cId : this.toCleanUpContainers) {
list2.add(ContainerStatus.newInstance(cId, ContainerState.RUNNING, "",
ContainerExitStatus.SUCCESS));
}
list.add(new UpdatedContainerInfo(new ArrayList<ContainerStatus>(),
list2));
return list;
}
@Override
public String getNodeManagerVersion() {
return null;
}
@Override
public Set<String> getNodeLabels() {
return RMNodeLabelsManager.EMPTY_STRING_SET;
}
}
public static RMNode newNodeInfo(String rackName, String hostName,
final Resource resource, int port) {
final NodeId nodeId = newNodeID(hostName, port);
final String nodeAddr = hostName + ":" + port;
final String httpAddress = hostName;
return new FakeRMNodeImpl(nodeId, nodeAddr, httpAddress,
resource, rackName, "Me good",
port, hostName, null);
}
public static RMNode newNodeInfo(String rackName, String hostName,
final Resource resource) {
return newNodeInfo(rackName, hostName, resource, NODE_ID++);
}
}
| 5,913 | 29.802083 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.nodemanager;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.DelayQueue;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords
.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords
.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Logger;
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
import org.apache.hadoop.yarn.sls.utils.SLSUtils;
@Private
@Unstable
public class NMSimulator extends TaskRunner.Task {
// node resource
private RMNode node;
// master key
private MasterKey masterKey;
// containers with various STATE
private List<ContainerId> completedContainerList;
private List<ContainerId> releasedContainerList;
private DelayQueue<ContainerSimulator> containerQueue;
private Map<ContainerId, ContainerSimulator> runningContainers;
private List<ContainerId> amContainerList;
// resource manager
private ResourceManager rm;
// heart beat response id
private int RESPONSE_ID = 1;
private final static Logger LOG = Logger.getLogger(NMSimulator.class);
public void init(String nodeIdStr, int memory, int cores,
int dispatchTime, int heartBeatInterval, ResourceManager rm)
throws IOException, YarnException {
super.init(dispatchTime, dispatchTime + 1000000L * heartBeatInterval,
heartBeatInterval);
// create resource
String rackHostName[] = SLSUtils.getRackHostName(nodeIdStr);
this.node = NodeInfo.newNodeInfo(rackHostName[0], rackHostName[1],
BuilderUtils.newResource(memory, cores));
this.rm = rm;
// init data structures
completedContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
releasedContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
containerQueue = new DelayQueue<ContainerSimulator>();
amContainerList =
Collections.synchronizedList(new ArrayList<ContainerId>());
runningContainers =
new ConcurrentHashMap<ContainerId, ContainerSimulator>();
// register NM with RM
RegisterNodeManagerRequest req =
Records.newRecord(RegisterNodeManagerRequest.class);
req.setNodeId(node.getNodeID());
req.setResource(node.getTotalCapability());
req.setHttpPort(80);
RegisterNodeManagerResponse response = rm.getResourceTrackerService()
.registerNodeManager(req);
masterKey = response.getNMTokenMasterKey();
}
@Override
public void firstStep() {
// do nothing
}
@Override
public void middleStep() throws Exception {
// we check the lifetime for each running containers
ContainerSimulator cs = null;
synchronized(completedContainerList) {
while ((cs = containerQueue.poll()) != null) {
runningContainers.remove(cs.getId());
completedContainerList.add(cs.getId());
LOG.debug(MessageFormat.format("Container {0} has completed",
cs.getId()));
}
}
// send heart beat
NodeHeartbeatRequest beatRequest =
Records.newRecord(NodeHeartbeatRequest.class);
beatRequest.setLastKnownNMTokenMasterKey(masterKey);
NodeStatus ns = Records.newRecord(NodeStatus.class);
ns.setContainersStatuses(generateContainerStatusList());
ns.setNodeId(node.getNodeID());
ns.setKeepAliveApplications(new ArrayList<ApplicationId>());
ns.setResponseId(RESPONSE_ID ++);
ns.setNodeHealthStatus(NodeHealthStatus.newInstance(true, "", 0));
beatRequest.setNodeStatus(ns);
NodeHeartbeatResponse beatResponse =
rm.getResourceTrackerService().nodeHeartbeat(beatRequest);
if (! beatResponse.getContainersToCleanup().isEmpty()) {
// remove from queue
synchronized(releasedContainerList) {
for (ContainerId containerId : beatResponse.getContainersToCleanup()){
if (amContainerList.contains(containerId)) {
// AM container (not killed?, only release)
synchronized(amContainerList) {
amContainerList.remove(containerId);
}
LOG.debug(MessageFormat.format("NodeManager {0} releases " +
"an AM ({1}).", node.getNodeID(), containerId));
} else {
cs = runningContainers.remove(containerId);
containerQueue.remove(cs);
releasedContainerList.add(containerId);
LOG.debug(MessageFormat.format("NodeManager {0} releases a " +
"container ({1}).", node.getNodeID(), containerId));
}
}
}
}
if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) {
lastStep();
}
}
@Override
public void lastStep() {
// do nothing
}
/**
* catch status of all containers located on current node
*/
private ArrayList<ContainerStatus> generateContainerStatusList() {
ArrayList<ContainerStatus> csList = new ArrayList<ContainerStatus>();
// add running containers
for (ContainerSimulator container : runningContainers.values()) {
csList.add(newContainerStatus(container.getId(),
ContainerState.RUNNING, ContainerExitStatus.SUCCESS));
}
synchronized(amContainerList) {
for (ContainerId cId : amContainerList) {
csList.add(newContainerStatus(cId,
ContainerState.RUNNING, ContainerExitStatus.SUCCESS));
}
}
// add complete containers
synchronized(completedContainerList) {
for (ContainerId cId : completedContainerList) {
LOG.debug(MessageFormat.format("NodeManager {0} completed" +
" container ({1}).", node.getNodeID(), cId));
csList.add(newContainerStatus(
cId, ContainerState.COMPLETE, ContainerExitStatus.SUCCESS));
}
completedContainerList.clear();
}
// released containers
synchronized(releasedContainerList) {
for (ContainerId cId : releasedContainerList) {
LOG.debug(MessageFormat.format("NodeManager {0} released container" +
" ({1}).", node.getNodeID(), cId));
csList.add(newContainerStatus(
cId, ContainerState.COMPLETE, ContainerExitStatus.ABORTED));
}
releasedContainerList.clear();
}
return csList;
}
private ContainerStatus newContainerStatus(ContainerId cId,
ContainerState state,
int exitState) {
ContainerStatus cs = Records.newRecord(ContainerStatus.class);
cs.setContainerId(cId);
cs.setState(state);
cs.setExitStatus(exitState);
return cs;
}
public RMNode getNode() {
return node;
}
/**
* launch a new container with the given life time
*/
public void addNewContainer(Container container, long lifeTimeMS) {
LOG.debug(MessageFormat.format("NodeManager {0} launches a new " +
"container ({1}).", node.getNodeID(), container.getId()));
if (lifeTimeMS != -1) {
// normal container
ContainerSimulator cs = new ContainerSimulator(container.getId(),
container.getResource(), lifeTimeMS + System.currentTimeMillis(),
lifeTimeMS);
containerQueue.add(cs);
runningContainers.put(cs.getId(), cs);
} else {
// AM container
// -1 means AMContainer
synchronized(amContainerList) {
amContainerList.add(container.getId());
}
}
}
/**
* clean up an AM container and add to completed list
* @param containerId id of the container to be cleaned
*/
public void cleanupContainer(ContainerId containerId) {
synchronized(amContainerList) {
amContainerList.remove(containerId);
}
synchronized(completedContainerList) {
completedContainerList.add(containerId);
}
}
@VisibleForTesting
Map<ContainerId, ContainerSimulator> getRunningContainers() {
return runningContainers;
}
@VisibleForTesting
List<ContainerId> getAMContainers() {
return amContainerList;
}
@VisibleForTesting
List<ContainerId> getCompletedContainers() {
return completedContainerList;
}
}
| 10,420 | 36.757246 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.conf;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class SLSConfiguration {
// sls
public static final String PREFIX = "yarn.sls.";
// runner
public static final String RUNNER_PREFIX = PREFIX + "runner.";
public static final String RUNNER_POOL_SIZE = RUNNER_PREFIX + "pool.size";
public static final int RUNNER_POOL_SIZE_DEFAULT = 10;
// scheduler
public static final String SCHEDULER_PREFIX = PREFIX + "scheduler.";
public static final String RM_SCHEDULER = SCHEDULER_PREFIX + "class";
// metrics
public static final String METRICS_PREFIX = PREFIX + "metrics.";
public static final String METRICS_SWITCH = METRICS_PREFIX + "switch";
public static final String METRICS_WEB_ADDRESS_PORT = METRICS_PREFIX
+ "web.address.port";
public static final String METRICS_OUTPUT_DIR = METRICS_PREFIX + "output";
public static final int METRICS_WEB_ADDRESS_PORT_DEFAULT = 10001;
public static final String METRICS_TIMER_WINDOW_SIZE = METRICS_PREFIX
+ "timer.window.size";
public static final int METRICS_TIMER_WINDOW_SIZE_DEFAULT = 100;
public static final String METRICS_RECORD_INTERVAL_MS = METRICS_PREFIX
+ "record.interval.ms";
public static final int METRICS_RECORD_INTERVAL_MS_DEFAULT = 1000;
// nm
public static final String NM_PREFIX = PREFIX + "nm.";
public static final String NM_MEMORY_MB = NM_PREFIX + "memory.mb";
public static final int NM_MEMORY_MB_DEFAULT = 10240;
public static final String NM_VCORES = NM_PREFIX + "vcores";
public static final int NM_VCORES_DEFAULT = 10;
public static final String NM_HEARTBEAT_INTERVAL_MS = NM_PREFIX
+ "heartbeat.interval.ms";
public static final int NM_HEARTBEAT_INTERVAL_MS_DEFAULT = 1000;
// am
public static final String AM_PREFIX = PREFIX + "am.";
public static final String AM_HEARTBEAT_INTERVAL_MS = AM_PREFIX
+ "heartbeat.interval.ms";
public static final int AM_HEARTBEAT_INTERVAL_MS_DEFAULT = 1000;
public static final String AM_TYPE = AM_PREFIX + "type.";
// container
public static final String CONTAINER_PREFIX = PREFIX + "container.";
public static final String CONTAINER_MEMORY_MB = CONTAINER_PREFIX
+ "memory.mb";
public static final int CONTAINER_MEMORY_MB_DEFAULT = 1024;
public static final String CONTAINER_VCORES = CONTAINER_PREFIX + "vcores";
public static final int CONTAINER_VCORES_DEFAULT = 1;
}
| 3,563 | 47.162162 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.appmaster;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
import org.apache.hadoop.yarn.sls.SLSRunner;
import org.apache.log4j.Logger;
@Private
@Unstable
public class MRAMSimulator extends AMSimulator {
/*
Vocabulary Used:
pending -> requests which are NOT yet sent to RM
scheduled -> requests which are sent to RM but not yet assigned
assigned -> requests which are assigned to a container
completed -> request corresponding to which container has completed
Maps are scheduled as soon as their requests are received. Reduces are
scheduled when all maps have finished (not support slow-start currently).
*/
private static final int PRIORITY_REDUCE = 10;
private static final int PRIORITY_MAP = 20;
// pending maps
private LinkedList<ContainerSimulator> pendingMaps =
new LinkedList<ContainerSimulator>();
// pending failed maps
private LinkedList<ContainerSimulator> pendingFailedMaps =
new LinkedList<ContainerSimulator>();
// scheduled maps
private LinkedList<ContainerSimulator> scheduledMaps =
new LinkedList<ContainerSimulator>();
// assigned maps
private Map<ContainerId, ContainerSimulator> assignedMaps =
new HashMap<ContainerId, ContainerSimulator>();
// reduces which are not yet scheduled
private LinkedList<ContainerSimulator> pendingReduces =
new LinkedList<ContainerSimulator>();
// pending failed reduces
private LinkedList<ContainerSimulator> pendingFailedReduces =
new LinkedList<ContainerSimulator>();
// scheduled reduces
private LinkedList<ContainerSimulator> scheduledReduces =
new LinkedList<ContainerSimulator>();
// assigned reduces
private Map<ContainerId, ContainerSimulator> assignedReduces =
new HashMap<ContainerId, ContainerSimulator>();
// all maps & reduces
private LinkedList<ContainerSimulator> allMaps =
new LinkedList<ContainerSimulator>();
private LinkedList<ContainerSimulator> allReduces =
new LinkedList<ContainerSimulator>();
// counters
private int mapFinished = 0;
private int mapTotal = 0;
private int reduceFinished = 0;
private int reduceTotal = 0;
// waiting for AM container
private boolean isAMContainerRunning = false;
private Container amContainer;
// finished
private boolean isFinished = false;
// resource for AM container
private final static int MR_AM_CONTAINER_RESOURCE_MEMORY_MB = 1024;
private final static int MR_AM_CONTAINER_RESOURCE_VCORES = 1;
public final Logger LOG = Logger.getLogger(MRAMSimulator.class);
public void init(int id, int heartbeatInterval,
List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
long traceStartTime, long traceFinishTime, String user, String queue,
boolean isTracked, String oldAppId) {
super.init(id, heartbeatInterval, containerList, rm, se,
traceStartTime, traceFinishTime, user, queue,
isTracked, oldAppId);
amtype = "mapreduce";
// get map/reduce tasks
for (ContainerSimulator cs : containerList) {
if (cs.getType().equals("map")) {
cs.setPriority(PRIORITY_MAP);
pendingMaps.add(cs);
} else if (cs.getType().equals("reduce")) {
cs.setPriority(PRIORITY_REDUCE);
pendingReduces.add(cs);
}
}
allMaps.addAll(pendingMaps);
allReduces.addAll(pendingReduces);
mapTotal = pendingMaps.size();
reduceTotal = pendingReduces.size();
totalContainers = mapTotal + reduceTotal;
}
@Override
public void firstStep() throws Exception {
super.firstStep();
requestAMContainer();
}
/**
* send out request for AM container
*/
protected void requestAMContainer()
throws YarnException, IOException, InterruptedException {
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest amRequest = createResourceRequest(
BuilderUtils.newResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB,
MR_AM_CONTAINER_RESOURCE_VCORES),
ResourceRequest.ANY, 1, 1);
ask.add(amRequest);
LOG.debug(MessageFormat.format("Application {0} sends out allocate " +
"request for its AM", appId));
final AllocateRequest request = this.createAllocateRequest(ask);
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(appAttemptId.toString());
Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps()
.get(appAttemptId.getApplicationId())
.getRMAppAttempt(appAttemptId).getAMRMToken();
ugi.addTokenIdentifier(token.decodeIdentifier());
AllocateResponse response = ugi.doAs(
new PrivilegedExceptionAction<AllocateResponse>() {
@Override
public AllocateResponse run() throws Exception {
return rm.getApplicationMasterService().allocate(request);
}
});
if (response != null) {
responseQueue.put(response);
}
}
@Override
@SuppressWarnings("unchecked")
protected void processResponseQueue()
throws InterruptedException, YarnException, IOException {
// Check whether receive the am container
if (!isAMContainerRunning) {
if (!responseQueue.isEmpty()) {
AllocateResponse response = responseQueue.take();
if (response != null
&& !response.getAllocatedContainers().isEmpty()) {
// Get AM container
Container container = response.getAllocatedContainers().get(0);
se.getNmMap().get(container.getNodeId())
.addNewContainer(container, -1L);
// Start AM container
amContainer = container;
LOG.debug(MessageFormat.format("Application {0} starts its " +
"AM container ({1}).", appId, amContainer.getId()));
isAMContainerRunning = true;
}
}
return;
}
while (! responseQueue.isEmpty()) {
AllocateResponse response = responseQueue.take();
// check completed containers
if (! response.getCompletedContainersStatuses().isEmpty()) {
for (ContainerStatus cs : response.getCompletedContainersStatuses()) {
ContainerId containerId = cs.getContainerId();
if (cs.getExitStatus() == ContainerExitStatus.SUCCESS) {
if (assignedMaps.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one" +
"mapper finished ({1}).", appId, containerId));
assignedMaps.remove(containerId);
mapFinished ++;
finishedContainers ++;
} else if (assignedReduces.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one" +
"reducer finished ({1}).", appId, containerId));
assignedReduces.remove(containerId);
reduceFinished ++;
finishedContainers ++;
} else {
// am container released event
isFinished = true;
LOG.info(MessageFormat.format("Application {0} goes to " +
"finish.", appId));
}
} else {
// container to be killed
if (assignedMaps.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one " +
"mapper killed ({1}).", appId, containerId));
pendingFailedMaps.add(assignedMaps.remove(containerId));
} else if (assignedReduces.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one " +
"reducer killed ({1}).", appId, containerId));
pendingFailedReduces.add(assignedReduces.remove(containerId));
} else {
LOG.info(MessageFormat.format("Application {0}'s AM is " +
"going to be killed. Restarting...", appId));
restart();
}
}
}
}
// check finished
if (isAMContainerRunning &&
(mapFinished == mapTotal) &&
(reduceFinished == reduceTotal)) {
// to release the AM container
se.getNmMap().get(amContainer.getNodeId())
.cleanupContainer(amContainer.getId());
isAMContainerRunning = false;
LOG.debug(MessageFormat.format("Application {0} sends out event " +
"to clean up its AM container.", appId));
isFinished = true;
break;
}
// check allocated containers
for (Container container : response.getAllocatedContainers()) {
if (! scheduledMaps.isEmpty()) {
ContainerSimulator cs = scheduledMaps.remove();
LOG.debug(MessageFormat.format("Application {0} starts a " +
"launch a mapper ({1}).", appId, container.getId()));
assignedMaps.put(container.getId(), cs);
se.getNmMap().get(container.getNodeId())
.addNewContainer(container, cs.getLifeTime());
} else if (! this.scheduledReduces.isEmpty()) {
ContainerSimulator cs = scheduledReduces.remove();
LOG.debug(MessageFormat.format("Application {0} starts a " +
"launch a reducer ({1}).", appId, container.getId()));
assignedReduces.put(container.getId(), cs);
se.getNmMap().get(container.getNodeId())
.addNewContainer(container, cs.getLifeTime());
}
}
}
}
/**
* restart running because of the am container killed
*/
private void restart()
throws YarnException, IOException, InterruptedException {
// clear
finishedContainers = 0;
isFinished = false;
mapFinished = 0;
reduceFinished = 0;
pendingFailedMaps.clear();
pendingMaps.clear();
pendingReduces.clear();
pendingFailedReduces.clear();
pendingMaps.addAll(allMaps);
pendingReduces.addAll(pendingReduces);
isAMContainerRunning = false;
amContainer = null;
// resent am container request
requestAMContainer();
}
@Override
protected void sendContainerRequest()
throws YarnException, IOException, InterruptedException {
if (isFinished) {
return;
}
// send out request
List<ResourceRequest> ask = null;
if (isAMContainerRunning) {
if (mapFinished != mapTotal) {
// map phase
if (! pendingMaps.isEmpty()) {
ask = packageRequests(pendingMaps, PRIORITY_MAP);
LOG.debug(MessageFormat.format("Application {0} sends out " +
"request for {1} mappers.", appId, pendingMaps.size()));
scheduledMaps.addAll(pendingMaps);
pendingMaps.clear();
} else if (! pendingFailedMaps.isEmpty() && scheduledMaps.isEmpty()) {
ask = packageRequests(pendingFailedMaps, PRIORITY_MAP);
LOG.debug(MessageFormat.format("Application {0} sends out " +
"requests for {1} failed mappers.", appId,
pendingFailedMaps.size()));
scheduledMaps.addAll(pendingFailedMaps);
pendingFailedMaps.clear();
}
} else if (reduceFinished != reduceTotal) {
// reduce phase
if (! pendingReduces.isEmpty()) {
ask = packageRequests(pendingReduces, PRIORITY_REDUCE);
LOG.debug(MessageFormat.format("Application {0} sends out " +
"requests for {1} reducers.", appId, pendingReduces.size()));
scheduledReduces.addAll(pendingReduces);
pendingReduces.clear();
} else if (! pendingFailedReduces.isEmpty()
&& scheduledReduces.isEmpty()) {
ask = packageRequests(pendingFailedReduces, PRIORITY_REDUCE);
LOG.debug(MessageFormat.format("Application {0} sends out " +
"request for {1} failed reducers.", appId,
pendingFailedReduces.size()));
scheduledReduces.addAll(pendingFailedReduces);
pendingFailedReduces.clear();
}
}
}
if (ask == null) {
ask = new ArrayList<ResourceRequest>();
}
final AllocateRequest request = createAllocateRequest(ask);
if (totalContainers == 0) {
request.setProgress(1.0f);
} else {
request.setProgress((float) finishedContainers / totalContainers);
}
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(appAttemptId.toString());
Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps()
.get(appAttemptId.getApplicationId())
.getRMAppAttempt(appAttemptId).getAMRMToken();
ugi.addTokenIdentifier(token.decodeIdentifier());
AllocateResponse response = ugi.doAs(
new PrivilegedExceptionAction<AllocateResponse>() {
@Override
public AllocateResponse run() throws Exception {
return rm.getApplicationMasterService().allocate(request);
}
});
if (response != null) {
responseQueue.put(response);
}
}
@Override
protected void checkStop() {
if (isFinished) {
super.setEndTime(System.currentTimeMillis());
}
}
@Override
public void lastStep() throws Exception {
super.lastStep();
// clear data structures
allMaps.clear();
allReduces.clear();
assignedMaps.clear();
assignedReduces.clear();
pendingFailedMaps.clear();
pendingFailedReduces.clear();
pendingMaps.clear();
pendingReduces.clear();
scheduledMaps.clear();
scheduledReduces.clear();
responseQueue.clear();
}
}
| 15,607 | 36.883495 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.appmaster;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords
.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords
.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords
.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Logger;
import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
import org.apache.hadoop.yarn.sls.SLSRunner;
import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
import org.apache.hadoop.yarn.sls.utils.SLSUtils;
@Private
@Unstable
public abstract class AMSimulator extends TaskRunner.Task {
// resource manager
protected ResourceManager rm;
// main
protected SLSRunner se;
// application
protected ApplicationId appId;
protected ApplicationAttemptId appAttemptId;
protected String oldAppId; // jobId from the jobhistory file
// record factory
protected final static RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
// response queue
protected final BlockingQueue<AllocateResponse> responseQueue;
protected int RESPONSE_ID = 1;
// user name
protected String user;
// queue name
protected String queue;
// am type
protected String amtype;
// job start/end time
protected long traceStartTimeMS;
protected long traceFinishTimeMS;
protected long simulateStartTimeMS;
protected long simulateFinishTimeMS;
// whether tracked in Metrics
protected boolean isTracked;
// progress
protected int totalContainers;
protected int finishedContainers;
protected final Logger LOG = Logger.getLogger(AMSimulator.class);
public AMSimulator() {
this.responseQueue = new LinkedBlockingQueue<AllocateResponse>();
}
public void init(int id, int heartbeatInterval,
List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
long traceStartTime, long traceFinishTime, String user, String queue,
boolean isTracked, String oldAppId) {
super.init(traceStartTime, traceStartTime + 1000000L * heartbeatInterval,
heartbeatInterval);
this.user = user;
this.rm = rm;
this.se = se;
this.user = user;
this.queue = queue;
this.oldAppId = oldAppId;
this.isTracked = isTracked;
this.traceStartTimeMS = traceStartTime;
this.traceFinishTimeMS = traceFinishTime;
}
/**
* register with RM
*/
@Override
public void firstStep() throws Exception {
simulateStartTimeMS = System.currentTimeMillis() -
SLSRunner.getRunner().getStartTimeMS();
// submit application, waiting until ACCEPTED
submitApp();
// register application master
registerAM();
// track app metrics
trackApp();
}
@Override
public void middleStep() throws Exception {
// process responses in the queue
processResponseQueue();
// send out request
sendContainerRequest();
// check whether finish
checkStop();
}
@Override
public void lastStep() throws Exception {
LOG.info(MessageFormat.format("Application {0} is shutting down.", appId));
// unregister tracking
if (isTracked) {
untrackApp();
}
// unregister application master
final FinishApplicationMasterRequest finishAMRequest = recordFactory
.newRecordInstance(FinishApplicationMasterRequest.class);
finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(appAttemptId.toString());
Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps().get(appId)
.getRMAppAttempt(appAttemptId).getAMRMToken();
ugi.addTokenIdentifier(token.decodeIdentifier());
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
rm.getApplicationMasterService()
.finishApplicationMaster(finishAMRequest);
return null;
}
});
simulateFinishTimeMS = System.currentTimeMillis() -
SLSRunner.getRunner().getStartTimeMS();
// record job running information
((ResourceSchedulerWrapper)rm.getResourceScheduler())
.addAMRuntime(appId,
traceStartTimeMS, traceFinishTimeMS,
simulateStartTimeMS, simulateFinishTimeMS);
}
protected ResourceRequest createResourceRequest(
Resource resource, String host, int priority, int numContainers) {
ResourceRequest request = recordFactory
.newRecordInstance(ResourceRequest.class);
request.setCapability(resource);
request.setResourceName(host);
request.setNumContainers(numContainers);
Priority prio = recordFactory.newRecordInstance(Priority.class);
prio.setPriority(priority);
request.setPriority(prio);
return request;
}
protected AllocateRequest createAllocateRequest(List<ResourceRequest> ask,
List<ContainerId> toRelease) {
AllocateRequest allocateRequest =
recordFactory.newRecordInstance(AllocateRequest.class);
allocateRequest.setResponseId(RESPONSE_ID ++);
allocateRequest.setAskList(ask);
allocateRequest.setReleaseList(toRelease);
return allocateRequest;
}
protected AllocateRequest createAllocateRequest(List<ResourceRequest> ask) {
return createAllocateRequest(ask, new ArrayList<ContainerId>());
}
protected abstract void processResponseQueue() throws Exception;
protected abstract void sendContainerRequest() throws Exception;
protected abstract void checkStop();
private void submitApp()
throws YarnException, InterruptedException, IOException {
// ask for new application
GetNewApplicationRequest newAppRequest =
Records.newRecord(GetNewApplicationRequest.class);
GetNewApplicationResponse newAppResponse =
rm.getClientRMService().getNewApplication(newAppRequest);
appId = newAppResponse.getApplicationId();
// submit the application
final SubmitApplicationRequest subAppRequest =
Records.newRecord(SubmitApplicationRequest.class);
ApplicationSubmissionContext appSubContext =
Records.newRecord(ApplicationSubmissionContext.class);
appSubContext.setApplicationId(appId);
appSubContext.setMaxAppAttempts(1);
appSubContext.setQueue(queue);
appSubContext.setPriority(Priority.newInstance(0));
ContainerLaunchContext conLauContext =
Records.newRecord(ContainerLaunchContext.class);
conLauContext.setApplicationACLs(
new HashMap<ApplicationAccessType, String>());
conLauContext.setCommands(new ArrayList<String>());
conLauContext.setEnvironment(new HashMap<String, String>());
conLauContext.setLocalResources(new HashMap<String, LocalResource>());
conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
appSubContext.setAMContainerSpec(conLauContext);
appSubContext.setUnmanagedAM(true);
subAppRequest.setApplicationSubmissionContext(appSubContext);
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws YarnException {
rm.getClientRMService().submitApplication(subAppRequest);
return null;
}
});
LOG.info(MessageFormat.format("Submit a new application {0}", appId));
// waiting until application ACCEPTED
RMApp app = rm.getRMContext().getRMApps().get(appId);
while(app.getState() != RMAppState.ACCEPTED) {
Thread.sleep(10);
}
// Waiting until application attempt reach LAUNCHED
// "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
.getCurrentAppAttempt().getAppAttemptId();
RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
.getCurrentAppAttempt();
while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
Thread.sleep(10);
}
}
private void registerAM()
throws YarnException, IOException, InterruptedException {
// register application master
final RegisterApplicationMasterRequest amRegisterRequest =
Records.newRecord(RegisterApplicationMasterRequest.class);
amRegisterRequest.setHost("localhost");
amRegisterRequest.setRpcPort(1000);
amRegisterRequest.setTrackingUrl("localhost:1000");
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(appAttemptId.toString());
Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps().get(appId)
.getRMAppAttempt(appAttemptId).getAMRMToken();
ugi.addTokenIdentifier(token.decodeIdentifier());
ugi.doAs(
new PrivilegedExceptionAction<RegisterApplicationMasterResponse>() {
@Override
public RegisterApplicationMasterResponse run() throws Exception {
return rm.getApplicationMasterService()
.registerApplicationMaster(amRegisterRequest);
}
});
LOG.info(MessageFormat.format(
"Register the application master for application {0}", appId));
}
private void trackApp() {
if (isTracked) {
((ResourceSchedulerWrapper) rm.getResourceScheduler())
.addTrackedApp(appAttemptId, oldAppId);
}
}
public void untrackApp() {
if (isTracked) {
((ResourceSchedulerWrapper) rm.getResourceScheduler())
.removeTrackedApp(appAttemptId, oldAppId);
}
}
protected List<ResourceRequest> packageRequests(
List<ContainerSimulator> csList, int priority) {
// create requests
Map<String, ResourceRequest> rackLocalRequestMap = new HashMap<String, ResourceRequest>();
Map<String, ResourceRequest> nodeLocalRequestMap = new HashMap<String, ResourceRequest>();
ResourceRequest anyRequest = null;
for (ContainerSimulator cs : csList) {
String rackHostNames[] = SLSUtils.getRackHostName(cs.getHostname());
// check rack local
String rackname = rackHostNames[0];
if (rackLocalRequestMap.containsKey(rackname)) {
rackLocalRequestMap.get(rackname).setNumContainers(
rackLocalRequestMap.get(rackname).getNumContainers() + 1);
} else {
ResourceRequest request = createResourceRequest(
cs.getResource(), rackname, priority, 1);
rackLocalRequestMap.put(rackname, request);
}
// check node local
String hostname = rackHostNames[1];
if (nodeLocalRequestMap.containsKey(hostname)) {
nodeLocalRequestMap.get(hostname).setNumContainers(
nodeLocalRequestMap.get(hostname).getNumContainers() + 1);
} else {
ResourceRequest request = createResourceRequest(
cs.getResource(), hostname, priority, 1);
nodeLocalRequestMap.put(hostname, request);
}
// any
if (anyRequest == null) {
anyRequest = createResourceRequest(
cs.getResource(), ResourceRequest.ANY, priority, 1);
} else {
anyRequest.setNumContainers(anyRequest.getNumContainers() + 1);
}
}
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ask.addAll(nodeLocalRequestMap.values());
ask.addAll(rackLocalRequestMap.values());
if (anyRequest != null) {
ask.add(anyRequest);
}
return ask;
}
public String getQueue() {
return queue;
}
public String getAMType() {
return amtype;
}
public long getDuration() {
return simulateFinishTimeMS - simulateStartTimeMS;
}
public int getNumTasks() {
return totalContainers;
}
}
| 14,830 | 37.322997 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sls.utils;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.rumen.JobTraceReader;
import org.apache.hadoop.tools.rumen.LoggedJob;
import org.apache.hadoop.tools.rumen.LoggedTask;
import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
@Private
@Unstable
public class SLSUtils {
public static String[] getRackHostName(String hostname) {
hostname = hostname.substring(1);
return hostname.split("/");
}
/**
* parse the rumen trace file, return each host name
*/
public static Set<String> parseNodesFromRumenTrace(String jobTrace)
throws IOException {
Set<String> nodeSet = new HashSet<String>();
File fin = new File(jobTrace);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
JobTraceReader reader = new JobTraceReader(
new Path(fin.getAbsolutePath()), conf);
try {
LoggedJob job = null;
while ((job = reader.getNext()) != null) {
for(LoggedTask mapTask : job.getMapTasks()) {
// select the last attempt
LoggedTaskAttempt taskAttempt = mapTask.getAttempts()
.get(mapTask.getAttempts().size() - 1);
nodeSet.add(taskAttempt.getHostName().getValue());
}
for(LoggedTask reduceTask : job.getReduceTasks()) {
LoggedTaskAttempt taskAttempt = reduceTask.getAttempts()
.get(reduceTask.getAttempts().size() - 1);
nodeSet.add(taskAttempt.getHostName().getValue());
}
}
} finally {
reader.close();
}
return nodeSet;
}
/**
* parse the sls trace file, return each host name
*/
public static Set<String> parseNodesFromSLSTrace(String jobTrace)
throws IOException {
Set<String> nodeSet = new HashSet<String>();
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
Reader input =
new InputStreamReader(new FileInputStream(jobTrace), "UTF-8");
try {
Iterator<Map> i = mapper.readValues(
jsonF.createJsonParser(input), Map.class);
while (i.hasNext()) {
Map jsonE = i.next();
List tasks = (List) jsonE.get("job.tasks");
for (Object o : tasks) {
Map jsonTask = (Map) o;
String hostname = jsonTask.get("container.host").toString();
nodeSet.add(hostname);
}
}
} finally {
input.close();
}
return nodeSet;
}
/**
* parse the input node file, return each host name
*/
public static Set<String> parseNodesFromNodeFile(String nodeFile)
throws IOException {
Set<String> nodeSet = new HashSet<String>();
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
Reader input =
new InputStreamReader(new FileInputStream(nodeFile), "UTF-8");
try {
Iterator<Map> i = mapper.readValues(
jsonF.createJsonParser(input), Map.class);
while (i.hasNext()) {
Map jsonE = i.next();
String rack = "/" + jsonE.get("rack");
List tasks = (List) jsonE.get("nodes");
for (Object o : tasks) {
Map jsonNode = (Map) o;
nodeSet.add(rack + "/" + jsonNode.get("node"));
}
}
} finally {
input.close();
}
return nodeSet;
}
}
| 4,683 | 32.219858 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.junit.Assert;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
import org.junit.Test;
public class TestRecordFactory {
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
AllocateResponse response =
pbRecordFactory.newRecordInstance(AllocateResponse.class);
Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
AllocateRequest response =
pbRecordFactory.newRecordInstance(AllocateRequest.class);
Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
}
| 2,151 | 36.103448 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.junit.Test;
public class TestContainerLogAppender {
@Test
public void testAppendInClose() throws Exception {
final ContainerLogAppender claAppender = new ContainerLogAppender();
claAppender.setName("testCLA");
claAppender.setLayout(new PatternLayout("%-5p [%t]: %m%n"));
claAppender.setContainerLogDir("target/testAppendInClose/logDir");
claAppender.setContainerLogFile("syslog");
claAppender.setTotalLogFileSize(1000);
claAppender.activateOptions();
final Logger claLog = Logger.getLogger("testAppendInClose-catergory");
claLog.setAdditivity(false);
claLog.addAppender(claAppender);
claLog.info(new Object() {
public String toString() {
claLog.info("message1");
return "return message1";
}
});
claAppender.close();
}
}
| 1,731 | 35.083333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRpcFactoryProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RpcClientFactory;
import org.apache.hadoop.yarn.factories.RpcServerFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
import org.junit.Test;
public class TestRpcFactoryProvider {
@Test
public void testFactoryProvider() {
Configuration conf = new Configuration();
RpcClientFactory clientFactory = null;
RpcServerFactory serverFactory = null;
clientFactory = RpcFactoryProvider.getClientFactory(conf);
serverFactory = RpcFactoryProvider.getServerFactory(conf);
Assert.assertEquals(RpcClientFactoryPBImpl.class, clientFactory.getClass());
Assert.assertEquals(RpcServerFactoryPBImpl.class, serverFactory.getClass());
conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS, "unknown");
conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS, "unknown");
conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS, "unknown");
try {
clientFactory = RpcFactoryProvider.getClientFactory(conf);
Assert.fail("Expected an exception - unknown serializer");
} catch (YarnRuntimeException e) {
}
try {
serverFactory = RpcFactoryProvider.getServerFactory(conf);
Assert.fail("Expected an exception - unknown serializer");
} catch (YarnRuntimeException e) {
}
conf = new Configuration();
conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS, "NonExistantClass");
conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS, RpcServerFactoryPBImpl.class.getName());
try {
clientFactory = RpcFactoryProvider.getClientFactory(conf);
Assert.fail("Expected an exception - unknown class");
} catch (YarnRuntimeException e) {
}
try {
serverFactory = RpcFactoryProvider.getServerFactory(conf);
} catch (YarnRuntimeException e) {
Assert.fail("Error while loading factory using reflection: [" + RpcServerFactoryPBImpl.class.getName() + "]");
}
}
}
| 3,116 | 38.961538 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPCFactories.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.junit.Test;
public class TestRPCFactories {
@Test
public void test() {
testPbServerFactory();
testPbClientFactory();
}
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
ApplicationMasterProtocol instance = new AMRMProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
ApplicationMasterProtocol.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
if (server != null) {
server.stop();
}
}
}
private void testPbClientFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
System.err.println(addr.getHostName() + addr.getPort());
Configuration conf = new Configuration();
ApplicationMasterProtocol instance = new AMRMProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
ApplicationMasterProtocol.class, instance, addr, conf, null, 1);
server.start();
System.err.println(server.getListenerAddress());
System.err.println(NetUtils.getConnectAddress(server));
ApplicationMasterProtocol amrmClient = null;
try {
amrmClient = (ApplicationMasterProtocol) RpcClientFactoryPBImpl.get().getClient(ApplicationMasterProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create client");
}
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
if (server != null) {
server.stop();
}
}
}
public class AMRMProtocolTestImpl implements ApplicationMasterProtocol {
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
}
}
| 4,531 | 32.820896 | 166 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.util.Iterator;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import com.google.common.collect.Iterators;
/**
* Utilities to generate fake test apps
*/
public class MockApps {
static final Iterator<String> NAMES = Iterators.cycle("SleepJob",
"RandomWriter", "TeraSort", "TeraGen", "PigLatin", "WordCount",
"I18nApp<☯>");
static final Iterator<String> USERS = Iterators.cycle("dorothy", "tinman",
"scarecrow", "glinda", "nikko", "toto", "winkie", "zeke", "gulch");
static final Iterator<YarnApplicationState> STATES = Iterators.cycle(
YarnApplicationState.values());
static final Iterator<String> QUEUES = Iterators.cycle("a.a1", "a.a2",
"b.b1", "b.b2", "b.b3", "c.c1.c11", "c.c1.c12", "c.c1.c13",
"c.c2", "c.c3", "c.c4");
static final long TS = System.currentTimeMillis();
public static String newAppName() {
synchronized(NAMES) {
return NAMES.next();
}
}
public static String newUserName() {
synchronized(USERS) {
return USERS.next();
}
}
public static String newQueue() {
synchronized(QUEUES) {
return QUEUES.next();
}
}
public static ApplicationId newAppID(int i) {
return ApplicationId.newInstance(TS, i);
}
public static YarnApplicationState newAppState() {
synchronized(STATES) {
return STATES.next();
}
}
}
| 2,262 | 30 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
/*
* Test that the container launcher rpc times out properly. This is used
* by both RM to launch an AM as well as an AM to launch containers.
*/
public class TestContainerLaunchRPC {
static final Log LOG = LogFactory.getLog(TestContainerLaunchRPC.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Test
public void testHadoopProtoRPCTimeout() throws Exception {
testRPCTimeout(HadoopYarnProtoRPC.class.getName());
}
private void testRPCTimeout(String rpcClass) throws Exception {
Configuration conf = new Configuration();
// set timeout low for the test
conf.setInt("yarn.rpc.nm-command-timeout", 3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
try {
ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
server.getListenerAddress(), conf);
ContainerLaunchContext containerLaunchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken =
TestRPC.newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
try {
proxy.startContainers(allRequests);
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertEquals("Error, exception is not: "
+ SocketTimeoutException.class.getName(),
SocketTimeoutException.class.getName(), e.getClass().getName());
return;
}
} finally {
server.stop();
}
Assert.fail("timeout exception should have occurred!");
}
public class DummyContainerManager implements ContainerManagementProtocol {
private ContainerStatus status = null;
@Override
public StartContainersResponse startContainers(
StartContainersRequest requests) throws YarnException, IOException {
try {
// make the thread sleep to look like its not going to respond
Thread.sleep(10000);
} catch (Exception e) {
LOG.error(e);
throw new YarnException(e);
}
throw new YarnException("Shouldn't happen!!");
}
@Override
public StopContainersResponse
stopContainers(StopContainersRequest requests) throws YarnException,
IOException {
Exception e = new Exception("Dummy function", new Exception(
"Dummy function cause"));
throw new YarnException(e);
}
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws YarnException, IOException {
List<ContainerStatus> list = new ArrayList<ContainerStatus>();
list.add(status);
GetContainerStatusesResponse response =
GetContainerStatusesResponse.newInstance(list, null);
return null;
}
}
}
| 6,943 | 39.608187 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import static org.junit.Assert.assertSame;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Test;
public class TestYarnUncaughtExceptionHandler {
private static final YarnUncaughtExceptionHandler exHandler =
new YarnUncaughtExceptionHandler();
/**
* Throw {@code YarnRuntimeException} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithRuntimeException()
throws InterruptedException {
final YarnUncaughtExceptionHandler spyYarnHandler = spy(exHandler);
final YarnRuntimeException yarnException = new YarnRuntimeException(
"test-yarn-runtime-exception");
final Thread yarnThread = new Thread(new Runnable() {
@Override
public void run() {
throw yarnException;
}
});
yarnThread.setUncaughtExceptionHandler(spyYarnHandler);
assertSame(spyYarnHandler, yarnThread.getUncaughtExceptionHandler());
yarnThread.start();
yarnThread.join();
verify(spyYarnHandler).uncaughtException(yarnThread, yarnException);
}
/**
* <p>
* Throw {@code Error} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code System.exit(-1) }
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithError()
throws InterruptedException {
ExitUtil.disableSystemExit();
final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
final java.lang.Error error = new java.lang.Error("test-error");
final Thread errorThread = new Thread(new Runnable() {
@Override
public void run() {
throw error;
}
});
errorThread.setUncaughtExceptionHandler(spyErrorHandler);
assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
errorThread.start();
errorThread.join();
verify(spyErrorHandler).uncaughtException(errorThread, error);
}
/**
* <p>
* Throw {@code OutOfMemoryError} inside thread and
* check {@code YarnUncaughtExceptionHandler} instance
* <p>
* Used {@code ExitUtil} class to avoid jvm exit through
* {@code Runtime.getRuntime().halt(-1)}
*
* @throws InterruptedException
*/
@Test
public void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
}
| 4,009 | 33.273504 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
public class TestRPC {
private static final String EXCEPTION_MSG = "test error";
private static final String EXCEPTION_CAUSE = "exception cause";
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
@Test
public void testUnknownCall() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.IPC_RPC_IMPL, HadoopYarnProtoRPC.class
.getName());
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
// Any unrelated protocol would do
ApplicationClientProtocol proxy = (ApplicationClientProtocol) rpc.getProxy(
ApplicationClientProtocol.class, NetUtils.getConnectAddress(server), conf);
try {
proxy.getNewApplication(Records
.newRecord(GetNewApplicationRequest.class));
Assert.fail("Excepted RPC call to fail with unknown method.");
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().matches(
"Unknown method getNewApplication called on.*"
+ "org.apache.hadoop.yarn.proto.ApplicationClientProtocol"
+ "\\$ApplicationClientProtocolService\\$BlockingInterface protocol."));
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testHadoopProtoRPC() throws Exception {
test(HadoopYarnProtoRPC.class.getName());
}
private void test(String rpcClass) throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, ProtobufRpcEngine.class);
ContainerManagementProtocol proxy = (ContainerManagementProtocol)
rpc.getProxy(ContainerManagementProtocol.class,
NetUtils.getConnectAddress(server), conf);
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
proxy.startContainers(allRequests);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(containerId);
GetContainerStatusesRequest gcsRequest =
GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse response =
proxy.getContainerStatuses(gcsRequest);
List<ContainerStatus> statuses = response.getContainerStatuses();
//test remote exception
boolean exception = false;
try {
StopContainersRequest stopRequest =
recordFactory.newRecordInstance(StopContainersRequest.class);
stopRequest.setContainerIds(containerIds);
proxy.stopContainers(stopRequest);
} catch (YarnException e) {
exception = true;
Assert.assertTrue(e.getMessage().contains(EXCEPTION_MSG));
Assert.assertTrue(e.getMessage().contains(EXCEPTION_CAUSE));
System.out.println("Test Exception is " + e.getMessage());
} catch (Exception ex) {
ex.printStackTrace();
}
Assert.assertTrue(exception);
server.stop();
Assert.assertNotNull(statuses.get(0));
Assert.assertEquals(ContainerState.RUNNING, statuses.get(0).getState());
}
public class DummyContainerManager implements ContainerManagementProtocol {
private List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request)
throws YarnException {
GetContainerStatusesResponse response =
recordFactory.newRecordInstance(GetContainerStatusesResponse.class);
response.setContainerStatuses(statuses);
return response;
}
@Override
public StartContainersResponse startContainers(
StartContainersRequest requests) throws YarnException {
StartContainersResponse response =
recordFactory.newRecordInstance(StartContainersResponse.class);
for (StartContainerRequest request : requests.getStartContainerRequests()) {
Token containerToken = request.getContainerToken();
ContainerTokenIdentifier tokenId = null;
try {
tokenId = newContainerTokenIdentifier(containerToken);
} catch (IOException e) {
throw RPCUtil.getRemoteException(e);
}
ContainerStatus status =
recordFactory.newRecordInstance(ContainerStatus.class);
status.setState(ContainerState.RUNNING);
status.setContainerId(tokenId.getContainerID());
status.setExitStatus(0);
statuses.add(status);
}
return response;
}
@Override
public StopContainersResponse stopContainers(StopContainersRequest request)
throws YarnException {
Exception e = new Exception(EXCEPTION_MSG,
new Exception(EXCEPTION_CAUSE));
throw new YarnException(e);
}
}
public static ContainerTokenIdentifier newContainerTokenIdentifier(
Token containerToken) throws IOException {
org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token =
new org.apache.hadoop.security.token.Token<ContainerTokenIdentifier>(
containerToken.getIdentifier()
.array(), containerToken.getPassword().array(), new Text(
containerToken.getKind()),
new Text(containerToken.getService()));
return token.decodeIdentifier();
}
public static Token newContainerToken(NodeId nodeId, byte[] password,
ContainerTokenIdentifier tokenIdentifier) {
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
// NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
Token containerToken =
Token.newInstance(tokenIdentifier.getBytes(),
ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return containerToken;
}
}
| 10,611 | 41.790323 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.security;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.HadoopKerberosName;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.YARNDelegationTokenIdentifierProto;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.api.ContainerType;
import org.junit.Assert;
import org.junit.Test;
public class TestYARNTokenIdentifier {
@Test
public void testNMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
NodeId nodeId = NodeId.newInstance("host0", 0);
String applicationSubmitter = "usr0";
int masterKeyId = 1;
NMTokenIdentifier token = new NMTokenIdentifier(
appAttemptId, nodeId, applicationSubmitter, masterKeyId);
NMTokenIdentifier anotherToken = new NMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
// verify all properties are the same as original
Assert.assertEquals(
"appAttemptId from proto is not the same with original token",
anotherToken.getApplicationAttemptId(), appAttemptId);
Assert.assertEquals(
"NodeId from proto is not the same with original token",
anotherToken.getNodeId(), nodeId);
Assert.assertEquals(
"applicationSubmitter from proto is not the same with original token",
anotherToken.getApplicationSubmitter(), applicationSubmitter);
Assert.assertEquals(
"masterKeyId from proto is not the same with original token",
anotherToken.getKeyId(), masterKeyId);
}
@Test
public void testAMRMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
int masterKeyId = 1;
AMRMTokenIdentifier token = new AMRMTokenIdentifier(appAttemptId, masterKeyId);
AMRMTokenIdentifier anotherToken = new AMRMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals("ApplicationAttemptId from proto is not the same with original token",
anotherToken.getApplicationAttemptId(), appAttemptId);
Assert.assertEquals("masterKeyId from proto is not the same with original token",
anotherToken.getKeyId(), masterKeyId);
}
@Test
public void testClientToAMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
String clientName = "user";
ClientToAMTokenIdentifier token = new ClientToAMTokenIdentifier(
appAttemptId, clientName);
ClientToAMTokenIdentifier anotherToken = new ClientToAMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals("ApplicationAttemptId from proto is not the same with original token",
anotherToken.getApplicationAttemptID(), appAttemptId);
Assert.assertEquals("clientName from proto is not the same with original token",
anotherToken.getClientName(), clientName);
}
@Test
public void testContainerTokenIdentifier() throws IOException {
ContainerId containerID = ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(
1, 1), 1), 1);
String hostName = "host0";
String appSubmitter = "usr0";
Resource r = Resource.newInstance(1024, 1);
long expiryTimeStamp = 1000;
int masterKeyId = 1;
long rmIdentifier = 1;
Priority priority = Priority.newInstance(1);
long creationTime = 1000;
ContainerTokenIdentifier token = new ContainerTokenIdentifier(
containerID, hostName, appSubmitter, r, expiryTimeStamp,
masterKeyId, rmIdentifier, priority, creationTime);
ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals(
"ContainerID from proto is not the same with original token",
anotherToken.getContainerID(), containerID);
Assert.assertEquals(
"Hostname from proto is not the same with original token",
anotherToken.getNmHostAddress(), hostName);
Assert.assertEquals(
"ApplicationSubmitter from proto is not the same with original token",
anotherToken.getApplicationSubmitter(), appSubmitter);
Assert.assertEquals(
"Resource from proto is not the same with original token",
anotherToken.getResource(), r);
Assert.assertEquals(
"expiryTimeStamp from proto is not the same with original token",
anotherToken.getExpiryTimeStamp(), expiryTimeStamp);
Assert.assertEquals(
"KeyId from proto is not the same with original token",
anotherToken.getMasterKeyId(), masterKeyId);
Assert.assertEquals(
"RMIdentifier from proto is not the same with original token",
anotherToken.getRMIdentifier(), rmIdentifier);
Assert.assertEquals(
"Priority from proto is not the same with original token",
anotherToken.getPriority(), priority);
Assert.assertEquals(
"CreationTime from proto is not the same with original token",
anotherToken.getCreationTime(), creationTime);
Assert.assertNull(anotherToken.getLogAggregationContext());
Assert.assertEquals(CommonNodeLabelsManager.NO_LABEL,
anotherToken.getNodeLabelExpression());
Assert.assertEquals(ContainerType.TASK,
anotherToken.getContainerType());
}
@Test
public void testRMDelegationTokenIdentifier() throws IOException {
Text owner = new Text("user1");
Text renewer = new Text("user2");
Text realUser = new Text("user3");
long issueDate = 1;
long maxDate = 2;
int sequenceNumber = 3;
int masterKeyId = 4;
RMDelegationTokenIdentifier token =
new RMDelegationTokenIdentifier(owner, renewer, realUser);
token.setIssueDate(issueDate);
token.setMaxDate(maxDate);
token.setSequenceNumber(sequenceNumber);
token.setMasterKeyId(masterKeyId);
RMDelegationTokenIdentifier anotherToken = new RMDelegationTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
dib.close();
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals("owner from proto is not the same with original token",
anotherToken.getOwner(), owner);
Assert.assertEquals("renewer from proto is not the same with original token",
anotherToken.getRenewer(), renewer);
Assert.assertEquals("realUser from proto is not the same with original token",
anotherToken.getRealUser(), realUser);
Assert.assertEquals("issueDate from proto is not the same with original token",
anotherToken.getIssueDate(), issueDate);
Assert.assertEquals("maxDate from proto is not the same with original token",
anotherToken.getMaxDate(), maxDate);
Assert.assertEquals("sequenceNumber from proto is not the same with original token",
anotherToken.getSequenceNumber(), sequenceNumber);
Assert.assertEquals("masterKeyId from proto is not the same with original token",
anotherToken.getMasterKeyId(), masterKeyId);
// Test getProto
RMDelegationTokenIdentifier token1 =
new RMDelegationTokenIdentifier(owner, renewer, realUser);
token1.setIssueDate(issueDate);
token1.setMaxDate(maxDate);
token1.setSequenceNumber(sequenceNumber);
token1.setMasterKeyId(masterKeyId);
YARNDelegationTokenIdentifierProto tokenProto = token1.getProto();
// Write token proto to stream
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
tokenProto.writeTo(out);
// Read token
byte[] tokenData = baos.toByteArray();
RMDelegationTokenIdentifier readToken = new RMDelegationTokenIdentifier();
DataInputBuffer db = new DataInputBuffer();
db.reset(tokenData, tokenData.length);
readToken.readFields(db);
// Verify if read token equals with original token
Assert.assertEquals("Token from getProto is not the same after " +
"serialization and deserialization.", token1, readToken);
db.close();
out.close();
}
@Test
public void testTimelineDelegationTokenIdentifier() throws IOException {
Text owner = new Text("user1");
Text renewer = new Text("user2");
Text realUser = new Text("user3");
long issueDate = 1;
long maxDate = 2;
int sequenceNumber = 3;
int masterKeyId = 4;
TimelineDelegationTokenIdentifier token =
new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
token.setIssueDate(issueDate);
token.setMaxDate(maxDate);
token.setSequenceNumber(sequenceNumber);
token.setMasterKeyId(masterKeyId);
TimelineDelegationTokenIdentifier anotherToken =
new TimelineDelegationTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals("owner from proto is not the same with original token",
anotherToken.getOwner(), owner);
Assert.assertEquals("renewer from proto is not the same with original token",
anotherToken.getRenewer(), renewer);
Assert.assertEquals("realUser from proto is not the same with original token",
anotherToken.getRealUser(), realUser);
Assert.assertEquals("issueDate from proto is not the same with original token",
anotherToken.getIssueDate(), issueDate);
Assert.assertEquals("maxDate from proto is not the same with original token",
anotherToken.getMaxDate(), maxDate);
Assert.assertEquals("sequenceNumber from proto is not the same with original token",
anotherToken.getSequenceNumber(), sequenceNumber);
Assert.assertEquals("masterKeyId from proto is not the same with original token",
anotherToken.getMasterKeyId(), masterKeyId);
}
@Test
public void testParseTimelineDelegationTokenIdentifierRenewer() throws IOException {
// Server side when generation a timeline DT
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0]([nr]m@.*EXAMPLE.COM)s/.*/yarn/");
HadoopKerberosName.setConfiguration(conf);
Text owner = new Text("owner");
Text renewer = new Text("rm/[email protected]");
Text realUser = new Text("realUser");
TimelineDelegationTokenIdentifier token =
new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
Assert.assertEquals(new Text("yarn"), token.getRenewer());
}
@Test
public void testAMContainerTokenIdentifier() throws IOException {
ContainerId containerID = ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(
1, 1), 1), 1);
String hostName = "host0";
String appSubmitter = "usr0";
Resource r = Resource.newInstance(1024, 1);
long expiryTimeStamp = 1000;
int masterKeyId = 1;
long rmIdentifier = 1;
Priority priority = Priority.newInstance(1);
long creationTime = 1000;
ContainerTokenIdentifier token =
new ContainerTokenIdentifier(containerID, hostName, appSubmitter, r,
expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
null, CommonNodeLabelsManager.NO_LABEL, ContainerType.APPLICATION_MASTER);
ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
Assert.assertEquals(ContainerType.APPLICATION_MASTER,
anotherToken.getContainerType());
token =
new ContainerTokenIdentifier(containerID, hostName, appSubmitter, r,
expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
null, CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK);
anotherToken = new ContainerTokenIdentifier();
tokenContent = token.getBytes();
dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
Assert.assertEquals(ContainerType.TASK,
anotherToken.getContainerType());
}
}
| 16,061 | 38.757426 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestParseRoute.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import java.util.Arrays;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebAppException;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestParseRoute {
@Test public void testNormalAction() {
assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a1", ":a2"),
WebApp.parseRoute("/foo/action/:a1/:a2"));
}
@Test public void testDefaultController() {
assertEquals(Arrays.asList("/", "default", "index"),
WebApp.parseRoute("/"));
}
@Test public void testDefaultAction() {
assertEquals(Arrays.asList("/foo", "foo", "index"),
WebApp.parseRoute("/foo"));
assertEquals(Arrays.asList("/foo", "foo", "index"),
WebApp.parseRoute("/foo/"));
}
@Test public void testMissingAction() {
assertEquals(Arrays.asList("/foo", "foo", "index", ":a1"),
WebApp.parseRoute("/foo/:a1"));
}
@Test public void testDefaultCapture() {
assertEquals(Arrays.asList("/", "default", "index", ":a"),
WebApp.parseRoute("/:a"));
}
@Test public void testPartialCapture1() {
assertEquals(Arrays.asList("/foo/action/bar", "foo", "action", "bar", ":a"),
WebApp.parseRoute("/foo/action/bar/:a"));
}
@Test public void testPartialCapture2() {
assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a1", "bar",
":a2", ":a3"),
WebApp.parseRoute("/foo/action/:a1/bar/:a2/:a3"));
}
@Test public void testLeadingPaddings() {
assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a"),
WebApp.parseRoute(" /foo/action/ :a"));
}
@Test public void testTrailingPaddings() {
assertEquals(Arrays.asList("/foo/action", "foo", "action", ":a"),
WebApp.parseRoute("/foo/action//:a / "));
assertEquals(Arrays.asList("/foo/action", "foo", "action"),
WebApp.parseRoute("/foo/action / "));
}
@Test(expected=WebAppException.class) public void testMissingLeadingSlash() {
WebApp.parseRoute("foo/bar");
}
}
| 2,967 | 34.333333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_TABLE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.webapp.view.HtmlPage;
import org.apache.hadoop.yarn.webapp.view.JQueryUI;
import org.apache.hadoop.yarn.webapp.view.TextPage;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
public class TestWebApp {
static final Logger LOG = LoggerFactory.getLogger(TestWebApp.class);
static class FooController extends Controller {
final TestWebApp test;
@Inject FooController(TestWebApp test) {
this.test = test;
}
@Override public void index() {
set("key", test.echo("foo"));
}
public void bar() {
set("key", "bar");
}
public void names() {
for (int i = 0; i < 20; ++i) {
renderText(MockApps.newAppName() + "\n");
}
}
public void ex() {
boolean err = $("clear").isEmpty();
renderText(err ? "Should redirect to an error page." : "No error!");
if (err) {
throw new RuntimeException("exception test");
}
}
public void tables() {
render(TablesView.class);
}
}
static class FooView extends TextPage {
@Override public void render() {
puts($("key"), $("foo"));
}
}
static class DefaultController extends Controller {
@Override public void index() {
set("key", "default");
render(FooView.class);
}
}
static class TablesView extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
set(DATATABLES_ID, "t1 t2 t3 t4");
set(initID(DATATABLES, "t1"), tableInit().append("}").toString());
set(initID(DATATABLES, "t2"), join("{bJQueryUI:true, sDom:'t',",
"aoColumns:[null, {bSortable:false, bSearchable:false}]}"));
set(initID(DATATABLES, "t3"), "{bJQueryUI:true, sDom:'t'}");
set(initID(DATATABLES, "t4"), "{bJQueryUI:true, sDom:'t'}");
html.
title("Test DataTables").
link("/static/yarn.css").
_(JQueryUI.class).
style(".wrapper { padding: 1em }",
".wrapper h2 { margin: 0.5em 0 }",
".dataTables_wrapper { min-height: 1em }").
div(".wrapper").
h2("Default table init").
table("#t1").
thead().
tr().th("Column1").th("Column2")._()._().
tbody().
tr().td("c1r1").td("c2r1")._().
tr().td("c1r2").td("c2r2")._()._()._().
h2("Nested tables").
div(_INFO_WRAP).
table("#t2").
thead().
tr().th(_TH, "Column1").th(_TH, "Column2")._()._().
tbody().
tr().td("r1"). // th wouldn't work as of dt 1.7.5
td().$class(C_TABLE).
table("#t3").
thead().
tr().th("SubColumn1").th("SubColumn2")._()._().
tbody().
tr().td("subc1r1").td("subc2r1")._().
tr().td("subc1r2").td("subc2r2")._()._()._()._()._().
tr().td("r2"). // ditto
td().$class(C_TABLE).
table("#t4").
thead().
tr().th("SubColumn1").th("SubColumn2")._()._().
tbody().
tr().td("subc1r1").td("subc2r1")._().
tr().td("subc1r2").td("subc2r2")._().
_()._()._()._()._()._()._()._()._();
}
}
String echo(String s) { return s; }
@Test public void testCreate() {
WebApp app = WebApps.$for(this).start();
app.stop();
}
@Test public void testCreateWithPort() {
// see if the ephemeral port is updated
WebApp app = WebApps.$for(this).at(0).start();
int port = app.getListenerAddress().getPort();
assertTrue(port > 0);
app.stop();
// try to reuse the port
app = WebApps.$for(this).at(port).start();
assertEquals(port, app.getListenerAddress().getPort());
app.stop();
}
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class)
public void testCreateWithBindAddressNonZeroPort() {
WebApp app = WebApps.$for(this).at("0.0.0.0:50000").start();
int port = app.getListenerAddress().getPort();
assertEquals(50000, port);
// start another WebApp with same NonZero port
WebApp app2 = WebApps.$for(this).at("0.0.0.0:50000").start();
// An exception occurs (findPort disabled)
app.stop();
app2.stop();
}
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class)
public void testCreateWithNonZeroPort() {
WebApp app = WebApps.$for(this).at(50000).start();
int port = app.getListenerAddress().getPort();
assertEquals(50000, port);
// start another WebApp with same NonZero port
WebApp app2 = WebApps.$for(this).at(50000).start();
// An exception occurs (findPort disabled)
app.stop();
app2.stop();
}
@Test public void testServePaths() {
WebApp app = WebApps.$for("test", this).start();
assertEquals("/test", app.getRedirectPath());
String[] expectedPaths = { "/test", "/test/*" };
String[] pathSpecs = app.getServePathSpecs();
assertEquals(2, pathSpecs.length);
for(int i = 0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs, expectedPaths[i]));
}
app.stop();
}
@Test public void testServePathsNoName() {
WebApp app = WebApps.$for("", this).start();
assertEquals("/", app.getRedirectPath());
String[] expectedPaths = { "/*" };
String[] pathSpecs = app.getServePathSpecs();
assertEquals(1, pathSpecs.length);
for(int i = 0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs, expectedPaths[i]));
}
app.stop();
}
@Test public void testDefaultRoutes() throws Exception {
WebApp app = WebApps.$for("test", this).start();
String baseUrl = baseUrl(app);
try {
assertEquals("foo", getContent(baseUrl +"test/foo").trim());
assertEquals("foo", getContent(baseUrl +"test/foo/index").trim());
assertEquals("bar", getContent(baseUrl +"test/foo/bar").trim());
assertEquals("default", getContent(baseUrl +"test").trim());
assertEquals("default", getContent(baseUrl +"test/").trim());
assertEquals("default", getContent(baseUrl).trim());
} finally {
app.stop();
}
}
@Test public void testCustomRoutes() throws Exception {
WebApp app =
WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() {
@Override
public void setup() {
bind(MyTestJAXBContextResolver.class);
bind(MyTestWebService.class);
route("/:foo", FooController.class);
route("/bar/foo", FooController.class, "bar");
route("/foo/:foo", DefaultController.class);
route("/foo/bar/:foo", DefaultController.class, "index");
}
});
String baseUrl = baseUrl(app);
try {
assertEquals("foo", getContent(baseUrl).trim());
assertEquals("foo", getContent(baseUrl +"test").trim());
assertEquals("foo1", getContent(baseUrl +"test/1").trim());
assertEquals("bar", getContent(baseUrl +"test/bar/foo").trim());
assertEquals("default", getContent(baseUrl +"test/foo/bar").trim());
assertEquals("default1", getContent(baseUrl +"test/foo/1").trim());
assertEquals("default2", getContent(baseUrl +"test/foo/bar/2").trim());
assertEquals(404, getResponseCode(baseUrl +"test/goo"));
assertEquals(200, getResponseCode(baseUrl +"ws/v1/test"));
assertTrue(getContent(baseUrl +"ws/v1/test").contains("myInfo"));
} finally {
app.stop();
}
}
// This is to test the GuiceFilter should only be applied to webAppContext,
// not to staticContext and logContext;
@Test public void testYARNWebAppContext() throws Exception {
// setting up the log context
System.setProperty("hadoop.log.dir", "/Not/Existing/dir");
WebApp app = WebApps.$for("test", this).start(new WebApp() {
@Override public void setup() {
route("/", FooController.class);
}
});
String baseUrl = baseUrl(app);
try {
// should not redirect to foo
assertFalse("foo".equals(getContent(baseUrl +"static").trim()));
// Not able to access a non-existing dir, should not redirect to foo.
assertEquals(404, getResponseCode(baseUrl +"logs"));
// should be able to redirect to foo.
assertEquals("foo", getContent(baseUrl).trim());
} finally {
app.stop();
}
}
static String baseUrl(WebApp app) {
return "http://localhost:"+ app.port() +"/";
}
static String getContent(String url) {
try {
StringBuilder out = new StringBuilder();
InputStream in = new URL(url).openConnection().getInputStream();
byte[] buffer = new byte[64 * 1024];
int len = in.read(buffer);
while (len > 0) {
out.append(new String(buffer, 0, len));
len = in.read(buffer);
}
return out.toString();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
static int getResponseCode(String url) {
try {
HttpURLConnection c = (HttpURLConnection)new URL(url).openConnection();
return c.getResponseCode();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static void main(String[] args) throws Exception {
// For manual controller/view testing.
WebApps.$for("test", new TestWebApp()).at(8888).inDevMode().start().
joinThread();
// start(new WebApp() {
// @Override public void setup() {
// route("/:foo", FooController.class);
// route("/foo/:foo", FooController.class);
// route("/bar", FooController.class);
// }
// }).join();
}
}
| 11,502 | 34.070122 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestJAXBContextResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by joblicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext;
import org.apache.hadoop.yarn.webapp.MyTestWebService.MyInfo;
import com.google.inject.Singleton;
import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.api.json.JSONJAXBContext;
@Singleton
@Provider
public class MyTestJAXBContextResolver implements ContextResolver<JAXBContext> {
private JAXBContext context;
private final Set<Class> types;
// you have to specify all the dao classes here
private final Class[] cTypes = { MyInfo.class };
public MyTestJAXBContextResolver() throws Exception {
this.types = new HashSet<Class>(Arrays.asList(cTypes));
this.context =
new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false)
.build(), cTypes);
}
@Override
public JAXBContext getContext(Class<?> objectType) {
return (types.contains(objectType)) ? context : null;
}
}
| 1,877 | 32.535714 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestWebService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by joblicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import com.google.inject.Singleton;
@Singleton
@Path("/ws/v1/test")
public class MyTestWebService {
@GET
@Produces({ MediaType.APPLICATION_XML })
public MyInfo get() {
return new MyInfo();
}
@XmlRootElement(name = "myInfo")
@XmlAccessorType(XmlAccessType.FIELD)
static class MyInfo {
public MyInfo() {
}
}
}
| 1,447 | 29.808511 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.HtmlPage;
import java.io.PrintWriter;
import javax.servlet.http.HttpServletResponse;
import com.google.inject.Injector;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestSubViews {
static public class MainView extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
html.
body().
div().
_(Sub1.class)._().
div().
i("inline text").
_(Sub2.class)._()._()._();
}
}
static public class Sub1 extends HtmlBlock {
@Override
public void render(Block html) {
html.
div("#sub1").
_("sub1 text")._();
}
}
static public class Sub2 extends HtmlBlock {
@Override
public void render(Block html) {
html.
pre().
_("sub2 text")._();
}
}
@Test public void testSubView() throws Exception {
Injector injector = WebAppTests.createMockInjector(this);
injector.getInstance(MainView.class).render();
PrintWriter out =
injector.getInstance(HttpServletResponse.class).getWriter();
out.flush();
verify(out).print("sub1 text");
verify(out).print("sub2 text");
verify(out, times(16)).println(); // test inline transition across views
}
}
| 2,243 | 28.526316 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import org.junit.Before;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
public abstract class JerseyTestBase extends JerseyTest {
public JerseyTestBase(WebAppDescriptor appDescriptor) {
super(appDescriptor);
}
@Before
public void initializeJerseyPort() {
int jerseyPort = 9998;
String port = System.getProperty("jersey.test.port");
if(null != port) {
jerseyPort = Integer.parseInt(port) + 10;
if(jerseyPort > 65535) {
jerseyPort = 9998;
}
}
System.setProperty("jersey.test.port", Integer.toString(jerseyPort));
}
}
| 1,482 | 33.488372 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import static org.junit.Assert.assertTrue;
import org.w3c.dom.Attr;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
public class WebServicesTestUtils {
public static long getXmlLong(Element element, String name) {
String val = getXmlString(element, name);
return Long.parseLong(val);
}
public static int getXmlInt(Element element, String name) {
String val = getXmlString(element, name);
return Integer.parseInt(val);
}
public static Boolean getXmlBoolean(Element element, String name) {
String val = getXmlString(element, name);
return Boolean.parseBoolean(val);
}
public static float getXmlFloat(Element element, String name) {
String val = getXmlString(element, name);
return Float.parseFloat(val);
}
public static String getXmlString(Element element, String name) {
NodeList id = element.getElementsByTagName(name);
Element line = (Element) id.item(0);
if (line == null) {
return null;
}
Node first = line.getFirstChild();
// handle empty <key></key>
if (first == null) {
return "";
}
String val = first.getNodeValue();
if (val == null) {
return "";
}
return val;
}
public static String getXmlAttrString(Element element, String name) {
Attr at = element.getAttributeNode(name);
if (at != null) {
return at.getValue();
}
return null;
}
public static void checkStringMatch(String print, String expected, String got) {
assertTrue(
print + " doesn't match, got: " + got + " expected: " + expected,
got.matches(expected));
}
public static void checkStringContains(String print, String expected, String got) {
assertTrue(
print + " doesn't contain expected string, got: " + got + " expected: " + expected,
got.contains(expected));
}
public static void checkStringEqual(String print, String expected, String got) {
assertTrue(
print + " is not equal, got: " + got + " expected: " + expected,
got.equals(expected));
}
}
| 2,926 | 29.810526 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.hamlet;
import java.util.EnumSet;
import java.io.PrintWriter;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import static org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*;
public class TestHamlet {
@Test public void testHamlet() {
Hamlet h = newHamlet().
title("test").
h1("heading 1").
p("#id.class").
b("hello").
em("world!")._().
div("#footer").
_("Brought to you by").
a("http://hostname/", "Somebody")._();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out).print("<title");
verify(out).print("test");
verify(out).print("</title>");
verify(out).print("<h1");
verify(out).print("heading 1");
verify(out).print("</h1>");
verify(out).print("<p");
verify(out).print(" id=\"id\"");
verify(out).print(" class=\"class\"");
verify(out).print("<b");
verify(out).print("hello");
verify(out).print("</b>");
verify(out).print("<em");
verify(out).print("world!");
verify(out).print("</em>");
verify(out).print("<div");
verify(out).print(" id=\"footer\"");
verify(out).print("Brought to you by");
verify(out).print("<a");
verify(out).print(" href=\"http://hostname/\"");
verify(out).print("Somebody");
verify(out).print("</a>");
verify(out).print("</div>");
verify(out, never()).print("</p>");
}
@Test public void testTable() {
Hamlet h = newHamlet().
title("test table").
link("style.css");
TABLE t = h.table("#id");
for (int i = 0; i < 3; ++i) {
t.tr().td("1").td("2")._();
}
t._();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out).print("<table");
verify(out).print("</table>");
verify(out, atLeast(1)).print("</td>");
verify(out, atLeast(1)).print("</tr>");
}
@Test public void testEnumAttrs() {
Hamlet h = newHamlet().
meta_http("Content-type", "text/html; charset=utf-8").
title("test enum attrs").
link().$rel("stylesheet").
$media(EnumSet.of(Media.screen, Media.print)).
$type("text/css").$href("style.css")._().
link().$rel(EnumSet.of(LinkType.index, LinkType.start)).
$href("index.html")._();
h.div("#content")._("content")._();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out).print(" media=\"screen, print\"");
verify(out).print(" rel=\"start index\"");
}
@Test public void testScriptStyle() {
Hamlet h = newHamlet().
script("a.js").script("b.js").
style("h1 { font-size: 1.2em }");
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out, times(2)).print(" type=\"text/javascript\"");
verify(out).print(" type=\"text/css\"");
}
@Test public void testPreformatted() {
Hamlet h = newHamlet().
div().
i("inline before pre").
pre().
_("pre text1\npre text2").
i("inline in pre").
_("pre text after inline")._().
i("inline after pre")._();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(5, h.indents);
}
static class TestView1 implements SubView {
@Override public void renderPartial() {}
}
static class TestView2 implements SubView {
@Override public void renderPartial() {}
}
@Test public void testSubViews() {
Hamlet h = newHamlet().
title("test sub-views").
div("#view1")._(TestView1.class)._().
div("#view2")._(TestView2.class)._();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out).print("["+ TestView1.class.getName() +"]");
verify(out).print("["+ TestView2.class.getName() +"]");
}
static Hamlet newHamlet() {
PrintWriter out = spy(new PrintWriter(System.out));
return new Hamlet(out, 0, false);
}
}
| 4,976 | 28.802395 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.hamlet;
import java.io.PrintWriter;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl;
import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*;
public class TestHamletImpl {
/**
* Test the generic implementation methods
* @see TestHamlet for Hamlet syntax
*/
@Test public void testGeneric() {
PrintWriter out = spy(new PrintWriter(System.out));
HamletImpl hi = new HamletImpl(out, 0, false);
hi.
root("start")._attr("name", "value").
_("start text").
elem("sub")._attr("name", "value").
_("sub text")._().
elem("sub1")._noEndTag()._attr("boolean", null).
_("sub1text")._().
_("start text2").
elem("pre")._pre().
_("pre text").
elem("i")._inline()._("inline")._()._().
elem("i")._inline()._("inline after pre")._().
_("start text3").
elem("sub2").
_("sub2text")._().
elem("sub3")._noEndTag().
_("sub3text")._().
elem("sub4")._noEndTag().
elem("i")._inline()._("inline")._().
_("sub4text")._()._();
out.flush();
assertEquals(0, hi.nestLevel);
assertEquals(20, hi.indents);
verify(out).print("<start");
verify(out, times(2)).print(" name=\"value\"");
verify(out).print(" boolean");
verify(out).print("</start>");
verify(out, never()).print("</sub1>");
verify(out, never()).print("</sub3>");
verify(out, never()).print("</sub4>");
}
@Test public void testSetSelector() {
CoreAttrs e = mock(CoreAttrs.class);
HamletImpl.setSelector(e, "#id.class");
verify(e).$id("id");
verify(e).$class("class");
H1 t = mock(H1.class);
HamletImpl.setSelector(t, "#id.class")._("heading");
verify(t).$id("id");
verify(t).$class("class");
verify(t)._("heading");
}
@Test public void testSetLinkHref() {
LINK link = mock(LINK.class);
HamletImpl.setLinkHref(link, "uri");
HamletImpl.setLinkHref(link, "style.css");
verify(link).$href("uri");
verify(link).$rel("stylesheet");
verify(link).$href("style.css");
verifyNoMoreInteractions(link);
}
@Test public void testSetScriptSrc() {
SCRIPT script = mock(SCRIPT.class);
HamletImpl.setScriptSrc(script, "uri");
HamletImpl.setScriptSrc(script, "script.js");
verify(script).$src("uri");
verify(script).$type("text/javascript");
verify(script).$src("script.js");
verifyNoMoreInteractions(script);
}
}
| 3,400 | 30.201835 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.hamlet;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.yarn.webapp.WebAppException;
import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.*;
public class TestParseSelector {
@Test public void testNormal() {
String[] res = parseSelector("#id.class");
assertEquals("id", res[S_ID]);
assertEquals("class", res[S_CLASS]);
}
@Test public void testMultiClass() {
String[] res = parseSelector("#id.class1.class2");
assertEquals("id", res[S_ID]);
assertEquals("class1 class2", res[S_CLASS]);
}
@Test public void testMissingId() {
String[] res = parseSelector(".class");
assertNull(res[S_ID]);
assertEquals("class", res[S_CLASS]);
}
@Test public void testMissingClass() {
String[] res = parseSelector("#id");
assertEquals("id", res[S_ID]);
assertNull(res[S_CLASS]);
}
@Test(expected=WebAppException.class) public void testMissingAll() {
parseSelector("");
}
}
| 1,805 | 30.137931 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.util;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.http.HttpServer2.Builder;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Assert;
import org.junit.Test;
public class TestWebAppUtils {
private static final String RM1_NODE_ID = "rm1";
private static final String RM2_NODE_ID = "rm2";
// Because WebAppUtils#getResolvedAddress tries to resolve the hostname, we add a static mapping for dummy hostnames
// to make this test run anywhere without having to give some resolvable hostnames
private static String dummyHostNames[] = {"host1", "host2", "host3"};
private static final String anyIpAddress = "1.2.3.4";
private static Map<String, String> savedStaticResolution = new HashMap<>();
@BeforeClass
public static void initializeDummyHostnameResolution() throws Exception {
String previousIpAddress;
for (String hostName : dummyHostNames) {
if (null != (previousIpAddress = NetUtils.getStaticResolution(hostName))) {
savedStaticResolution.put(hostName, previousIpAddress);
}
NetUtils.addStaticResolution(hostName, anyIpAddress);
}
}
@AfterClass
public static void restoreDummyHostnameResolution() throws Exception {
for (Map.Entry<String, String> hostnameToIpEntry : savedStaticResolution.entrySet()) {
NetUtils.addStaticResolution(hostnameToIpEntry.getKey(), hostnameToIpEntry.getValue());
}
}
@Test
public void TestRMWebAppURLRemoteAndLocal() throws UnknownHostException {
Configuration configuration = new Configuration();
final String rmAddress = "host1:8088";
configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, rmAddress);
final String rm1Address = "host2:8088";
final String rm2Address = "host3:8088";
configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS + "." + RM1_NODE_ID, rm1Address);
configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS + "." + RM2_NODE_ID, rm2Address);
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
String rmRemoteUrl = WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(configuration);
Assert.assertEquals("ResolvedRemoteRMWebAppUrl should resolve to the first HA RM address", rm1Address, rmRemoteUrl);
String rmLocalUrl = WebAppUtils.getResolvedRMWebAppURLWithoutScheme(configuration);
Assert.assertEquals("ResolvedRMWebAppUrl should resolve to the default RM webapp address", rmAddress, rmLocalUrl);
}
@Test
public void testGetPassword() throws Exception {
Configuration conf = provisionCredentialsForSSL();
// use WebAppUtils as would be used by loadSslConfiguration
Assert.assertEquals("keypass",
WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_KEY_PASSWORD_KEY));
Assert.assertEquals("storepass",
WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY));
Assert.assertEquals("trustpass",
WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY));
// let's make sure that a password that doesn't exist returns null
Assert.assertEquals(null, WebAppUtils.getPassword(conf,"invalid-alias"));
}
@Test
public void testLoadSslConfiguration() throws Exception {
Configuration conf = provisionCredentialsForSSL();
TestBuilder builder = (TestBuilder) new TestBuilder();
builder = (TestBuilder) WebAppUtils.loadSslConfiguration(
builder, conf);
String keypass = "keypass";
String storepass = "storepass";
String trustpass = "trustpass";
// make sure we get the right passwords in the builder
assertEquals(keypass, ((TestBuilder)builder).keypass);
assertEquals(storepass, ((TestBuilder)builder).keystorePassword);
assertEquals(trustpass, ((TestBuilder)builder).truststorePassword);
}
protected Configuration provisionCredentialsForSSL() throws IOException,
Exception {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] keypass = {'k', 'e', 'y', 'p', 'a', 's', 's'};
char[] storepass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
char[] trustpass = {'t', 'r', 'u', 's', 't', 'p', 'a', 's', 's'};
// ensure that we get nulls when the key isn't there
assertEquals(null, provider.getCredentialEntry(
WebAppUtils.WEB_APP_KEY_PASSWORD_KEY));
assertEquals(null, provider.getCredentialEntry(
WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY));
assertEquals(null, provider.getCredentialEntry(
WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY));
// create new aliases
try {
provider.createCredentialEntry(
WebAppUtils.WEB_APP_KEY_PASSWORD_KEY, keypass);
provider.createCredentialEntry(
WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY, storepass);
provider.createCredentialEntry(
WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY, trustpass);
// write out so that it can be found in checks
provider.flush();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
// make sure we get back the right key directly from api
assertArrayEquals(keypass, provider.getCredentialEntry(
WebAppUtils.WEB_APP_KEY_PASSWORD_KEY).getCredential());
assertArrayEquals(storepass, provider.getCredentialEntry(
WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY).getCredential());
assertArrayEquals(trustpass, provider.getCredentialEntry(
WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY).getCredential());
return conf;
}
public class TestBuilder extends HttpServer2.Builder {
public String keypass;
public String keystorePassword;
public String truststorePassword;
@Override
public Builder trustStore(String location, String password, String type) {
truststorePassword = password;
return super.trustStore(location, password, type);
}
@Override
public Builder keyStore(String location, String password, String type) {
keystorePassword = password;
return super.keyStore(location, password, type);
}
@Override
public Builder keyPassword(String password) {
keypass = password;
return super.keyPassword(password);
}
}
}
| 8,146 | 39.133005 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/TestWebAppTests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.test;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
import com.google.inject.servlet.RequestScoped;
import java.io.PrintWriter;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.slf4j.LoggerFactory;
import org.slf4j.Logger;
public class TestWebAppTests {
static final Logger LOG = LoggerFactory.getLogger(TestWebAppTests.class);
@Test public void testInstances() throws Exception {
Injector injector = WebAppTests.createMockInjector(this);
HttpServletRequest req = injector.getInstance(HttpServletRequest.class);
HttpServletResponse res = injector.getInstance(HttpServletResponse.class);
String val = req.getParameter("foo");
PrintWriter out = res.getWriter();
out.println("Hello world!");
logInstances(req, res, out);
assertSame(req, injector.getInstance(HttpServletRequest.class));
assertSame(res, injector.getInstance(HttpServletResponse.class));
assertSame(this, injector.getInstance(TestWebAppTests.class));
verify(req).getParameter("foo");
verify(res).getWriter();
verify(out).println("Hello world!");
}
interface Foo {
}
static class Bar implements Foo {
}
static class FooBar extends Bar {
}
@Test public void testCreateInjector() throws Exception {
Bar bar = new Bar();
Injector injector = WebAppTests.createMockInjector(Foo.class, bar);
logInstances(injector.getInstance(HttpServletRequest.class),
injector.getInstance(HttpServletResponse.class),
injector.getInstance(HttpServletResponse.class).getWriter());
assertSame(bar, injector.getInstance(Foo.class));
}
@Test public void testCreateInjector2() {
final FooBar foobar = new FooBar();
Bar bar = new Bar();
Injector injector = WebAppTests.createMockInjector(Foo.class, bar,
new AbstractModule() {
@Override protected void configure() {
bind(Bar.class).toInstance(foobar);
}
});
assertNotSame(bar, injector.getInstance(Bar.class));
assertSame(foobar, injector.getInstance(Bar.class));
}
@RequestScoped
static class ScopeTest {
}
@Test public void testRequestScope() {
Injector injector = WebAppTests.createMockInjector(this);
assertSame(injector.getInstance(ScopeTest.class),
injector.getInstance(ScopeTest.class));
}
private void logInstances(HttpServletRequest req, HttpServletResponse res,
PrintWriter out) {
LOG.info("request: {}", req);
LOG.info("response: {}", res);
LOG.info("writer: {}", out);
}
}
| 3,548 | 33.125 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/test/WebAppTests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.test;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.View;
import org.apache.hadoop.yarn.webapp.WebAppException;
import java.lang.reflect.Method;
import java.util.Map;
import com.google.inject.Module;
import com.google.inject.Scopes;
import com.google.inject.servlet.RequestScoped;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Provides;
import java.io.IOException;
import java.io.PrintWriter;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletRequest;
import static org.mockito.Mockito.*;
public class WebAppTests {
/**
* Create a mock injector for tests
* @param <T> type of class/interface
* @param api the interface class of the object to inject
* @param impl the implementation object to inject
* @param modules additional guice modules
* @return an injector
*/
public static <T> Injector createMockInjector(final Class<T> api,
final T impl,
final Module... modules) {
return Guice.createInjector(new AbstractModule() {
final PrintWriter writer = spy(new PrintWriter(System.out));
final HttpServletRequest request = createRequest();
final HttpServletResponse response = createResponse();
@Override
protected void configure() {
if (api != null) {
bind(api).toInstance(impl);
}
bindScope(RequestScoped.class, Scopes.SINGLETON);
if (modules != null) {
for (Module module : modules) {
install(module);
}
}
}
@Provides HttpServletRequest request() {
return request;
}
@Provides HttpServletResponse response() {
return response;
}
@Provides PrintWriter writer() {
return writer;
}
HttpServletRequest createRequest() {
// the default suffices for now
return mock(HttpServletRequest.class);
}
HttpServletResponse createResponse() {
try {
HttpServletResponse res = mock(HttpServletResponse.class);
when(res.getWriter()).thenReturn(writer);
return res;
} catch (Exception e) {
throw new WebAppException(e);
}
}
});
}
// convenience
@SuppressWarnings("unchecked")
public static <T> Injector createMockInjector(T impl) {
return createMockInjector((Class<T>)impl.getClass(), impl);
}
public static void flushOutput(Injector injector) {
HttpServletResponse res = injector.getInstance(HttpServletResponse.class);
try {
res.getWriter().flush();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static <T> Injector testController(Class<? extends Controller> ctrlr,
String methodName, Class<T> api, T impl, Module... modules) {
try {
Injector injector = createMockInjector(api, impl, modules);
Method method = ctrlr.getMethod(methodName, (Class<?>[])null);
method.invoke(injector.getInstance(ctrlr), (Object[])null);
return injector;
} catch (Exception e) {
throw new WebAppException(e);
}
}
public static <T> Injector testController(Class<? extends Controller> ctrlr,
String methodName) {
return testController(ctrlr, methodName, null, null);
}
public static <T> Injector testPage(Class<? extends View> page, Class<T> api,
T impl, Map<String,String> params, Module... modules) {
Injector injector = createMockInjector(api, impl, modules);
View view = injector.getInstance(page);
if(params != null) {
for(Map.Entry<String, String> entry: params.entrySet()) {
view.set(entry.getKey(), entry.getValue());
}
}
view.render();
flushOutput(injector);
return injector;
}
public static <T> Injector testPage(Class<? extends View> page, Class<T> api,
T impl, Module... modules) {
return testPage(page, api, impl, null, modules);
}
// convenience
public static <T> Injector testPage(Class<? extends View> page) {
return testPage(page, null, null);
}
public static <T> Injector testBlock(Class<? extends SubView> block,
Class<T> api, T impl, Module... modules) {
Injector injector = createMockInjector(api, impl, modules);
injector.getInstance(block).renderPartial();
flushOutput(injector);
return injector;
}
// convenience
public static <T> Injector testBlock(Class<? extends SubView> block) {
return testBlock(block, null, null);
}
/**
* Convenience method to get the spy writer.
* @param injector the injector used for the test.
* @return The Spy writer.
* @throws IOException
*/
public static PrintWriter getPrintWriter(Injector injector)
throws IOException {
HttpServletResponse res = injector.getInstance(HttpServletResponse.class);
return res.getWriter();
}
}
| 5,967 | 30.914439 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlockForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.log;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.conf.Configuration;
public class AggregatedLogsBlockForTest extends AggregatedLogsBlock {
final private Map<String, String> params = new HashMap<String, String>();
private HttpServletRequest request;
public AggregatedLogsBlockForTest(Configuration conf) {
super(conf);
}
@Override
public void render(Block html) {
super.render(html);
}
public Map<String, String> moreParams() {
return params;
}
public HttpServletRequest request() {
return request;
}
public void setRequest(HttpServletRequest request) {
this.request = request;
}
}
| 1,543 | 28.132075 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import com.google.inject.Injector;
import java.io.PrintWriter;
import org.apache.hadoop.yarn.webapp.WebAppException;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.HtmlPage;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestHtmlBlock {
public static class TestBlock extends HtmlBlock {
@Override
public void render(Block html) {
html.
p("#testid")._("test note")._();
}
}
public static class ShortBlock extends HtmlBlock {
@Override
public void render(Block html) {
html.
p()._("should throw");
}
}
public static class ShortPage extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
html.
title("short test").
_(ShortBlock.class);
}
}
@Test public void testUsual() {
Injector injector = WebAppTests.testBlock(TestBlock.class);
PrintWriter out = injector.getInstance(PrintWriter.class);
verify(out).print(" id=\"testid\"");
verify(out).print("test note");
}
@Test(expected=WebAppException.class) public void testShortBlock() {
WebAppTests.testBlock(ShortBlock.class);
}
@Test(expected=WebAppException.class) public void testShortPage() {
WebAppTests.testPage(ShortPage.class);
}
}
| 2,206 | 28.426667 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import com.google.inject.Injector;
import java.io.PrintWriter;
import org.apache.hadoop.yarn.webapp.MimeType;
import org.apache.hadoop.yarn.webapp.WebAppException;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.view.HtmlPage;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestHtmlPage {
public static class TestView extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
html.
title("test").
p("#testid")._("test note")._()._();
}
}
public static class ShortView extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
html.
title("short test").
p()._("should throw");
}
}
@Test public void testUsual() {
Injector injector = WebAppTests.testPage(TestView.class);
PrintWriter out = injector.getInstance(PrintWriter.class);
// Verify the HTML page has correct meta tags in the header
verify(out).print(" http-equiv=\"X-UA-Compatible\"");
verify(out).print(" content=\"IE=8\"");
verify(out).print(" http-equiv=\"Content-type\"");
verify(out).print(String.format(" content=\"%s\"", MimeType.HTML));
verify(out).print("test");
verify(out).print(" id=\"testid\"");
verify(out).print("test note");
}
@Test(expected=WebAppException.class) public void testShort() {
WebAppTests.testPage(ShortView.class);
}
}
| 2,276 | 30.625 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.WebApps;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.view.HtmlPage;
import org.apache.hadoop.yarn.webapp.view.TwoColumnCssLayout;
import org.junit.Test;
public class TestTwoColumnCssPage {
public static class TestController extends Controller {
@Override
public void index() {
set("title", "Testing a Two Column Layout");
set("ui.accordion.id", "nav");
render(TwoColumnCssLayout.class);
}
public void names() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 8; ++i) {
sb.append(MockApps.newAppName()).append(' ');
}
setTitle(sb.toString());
}
public void textnames() {
names();
renderText($("title"));
}
}
public static class TestView extends HtmlPage {
@Override
public void render(Page.HTML<_> html) {
html.
title($("title")).
h1($("title"))._();
}
}
@Test public void shouldNotThrow() {
WebAppTests.testPage(TwoColumnCssLayout.class);
}
public static void main(String[] args) {
WebApps.$for("test").at(8888).inDevMode().start().joinThread();
}
}
| 2,128 | 29.414286 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import java.io.PrintWriter;
import java.io.StringWriter;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Before;
import org.junit.Test;
public class TestInfoBlock {
public static StringWriter sw;
public static PrintWriter pw;
static final String JAVASCRIPT = "<script>alert('text')</script>";
static final String JAVASCRIPT_ESCAPED =
"<script>alert('text')</script>";
public static class JavaScriptInfoBlock extends InfoBlock{
static ResponseInfo resInfo;
static {
resInfo = new ResponseInfo();
resInfo._("User_Name", JAVASCRIPT);
}
@Override
public PrintWriter writer() {
return TestInfoBlock.pw;
}
JavaScriptInfoBlock(ResponseInfo info) {
super(resInfo);
}
public JavaScriptInfoBlock() {
super(resInfo);
}
}
public static class MultilineInfoBlock extends InfoBlock{
static ResponseInfo resInfo;
static {
resInfo = new ResponseInfo();
resInfo._("Multiple_line_value", "This is one line.");
resInfo._("Multiple_line_value", "This is first line.\nThis is second line.");
}
@Override
public PrintWriter writer() {
return TestInfoBlock.pw;
}
MultilineInfoBlock(ResponseInfo info) {
super(resInfo);
}
public MultilineInfoBlock() {
super(resInfo);
}
}
@Before
public void setup() {
sw = new StringWriter();
pw = new PrintWriter(sw);
}
@Test(timeout=60000L)
public void testMultilineInfoBlock() throws Exception{
WebAppTests.testBlock(MultilineInfoBlock.class);
TestInfoBlock.pw.flush();
String output = TestInfoBlock.sw.toString().replaceAll(" +", " ");
String expectedMultilineData1 = String.format("<tr class=\"odd\">%n"
+ " <th>%n Multiple_line_value%n </th>%n"
+ " <td>%n This is one line.%n </td>%n");
String expectedMultilineData2 = String.format("<tr class=\"even\">%n"
+ " <th>%n Multiple_line_value%n </th>%n <td>%n <div>%n"
+ " This is first line.%n </div>%n <div>%n"
+ " This is second line.%n </div>%n");
assertTrue(output.contains(expectedMultilineData1) && output.contains(expectedMultilineData2));
}
@Test(timeout=60000L)
public void testJavaScriptInfoBlock() throws Exception{
WebAppTests.testBlock(JavaScriptInfoBlock.class);
TestInfoBlock.pw.flush();
String output = TestInfoBlock.sw.toString();
assertFalse(output.contains("<script>"));
assertTrue(output.contains(JAVASCRIPT_ESCAPED));
}
}
| 3,512 | 28.275 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/HtmlBlockForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
public class HtmlBlockForTest extends HtmlBlock {
@Override
protected void render(Block html) {
info("test!");
}
}
| 963 | 33.428571 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnLayout.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.WebApps;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
public class TestTwoColumnLayout {
public static class TestController extends Controller {
@Override
public void index() {
setTitle("Test the two column table layout");
set("ui.accordion.id", "nav");
render(TwoColumnLayout.class);
}
}
@Test public void shouldNotThrow() {
WebAppTests.testPage(TwoColumnLayout.class);
}
public static void main(String[] args) {
WebApps.$for("test").at(8888).inDevMode().start().joinThread();
}
}
| 1,492 | 32.177778 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/BlockForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import java.io.PrintWriter;
public class BlockForTest extends HtmlBlock.Block {
public BlockForTest(HtmlBlock htmlBlock, PrintWriter out, int level,
boolean wasInline) {
htmlBlock.super(out, level, wasInline);
}
}
| 1,069 | 33.516129 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestCommonViews.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp.view;
import com.google.inject.Injector;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.view.ErrorPage;
import org.apache.hadoop.yarn.webapp.view.FooterBlock;
import org.apache.hadoop.yarn.webapp.view.HeaderBlock;
import org.apache.hadoop.yarn.webapp.view.JQueryUI;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestCommonViews {
@Test public void testErrorPage() {
Injector injector = WebAppTests.testPage(ErrorPage.class);
}
@Test public void testHeaderBlock() {
WebAppTests.testBlock(HeaderBlock.class);
}
@Test public void testFooterBlock() {
WebAppTests.testBlock(FooterBlock.class);
}
@Test public void testJQueryUI() {
WebAppTests.testBlock(JQueryUI.class);
}
@Test public void testInfoBlock() {
Injector injector = WebAppTests.createMockInjector(this);
ResponseInfo info = injector.getInstance(ResponseInfo.class);
}
}
| 1,834 | 31.192982 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceDecreasePBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceDecreaseProto;
import org.junit.Test;
public class TestContainerResourceDecrease {
@Test
public void testResourceDecreaseContext() {
ContainerId containerId = ContainerId
.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance(
containerId, resource);
// get proto and recover to ctx
ContainerResourceDecreaseProto proto =
((ContainerResourceDecreasePBImpl) ctx).getProto();
ctx = new ContainerResourceDecreasePBImpl(proto);
// check values
Assert.assertEquals(ctx.getCapability(), resource);
Assert.assertEquals(ctx.getContainerId(), containerId);
}
@Test
public void testResourceDecreaseContextWithNull() {
ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance(null,
null);
// get proto and recover to ctx;
ContainerResourceDecreaseProto proto =
((ContainerResourceDecreasePBImpl) ctx).getProto();
ctx = new ContainerResourceDecreasePBImpl(proto);
// check values
Assert.assertNull(ctx.getCapability());
Assert.assertNull(ctx.getContainerId());
}
}
| 2,583 | 37.567164 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestAllocateResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
import org.junit.Assert;
import org.junit.Test;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
public class TestAllocateResponse {
@SuppressWarnings("deprecation")
@Test
public void testAllocateResponseWithIncDecContainers() {
List<ContainerResourceIncrease> incContainers =
new ArrayList<ContainerResourceIncrease>();
List<ContainerResourceDecrease> decContainers =
new ArrayList<ContainerResourceDecrease>();
for (int i = 0; i < 3; i++) {
incContainers.add(ContainerResourceIncrease.newInstance(null,
Resource.newInstance(1024, i), null));
}
for (int i = 0; i < 5; i++) {
decContainers.add(ContainerResourceDecrease.newInstance(null,
Resource.newInstance(1024, i)));
}
AllocateResponse r =
AllocateResponse.newInstance(3, new ArrayList<ContainerStatus>(),
new ArrayList<Container>(), new ArrayList<NodeReport>(), null,
AMCommand.AM_RESYNC, 3, null, new ArrayList<NMToken>(),
incContainers, decContainers);
// serde
AllocateResponseProto p = ((AllocateResponsePBImpl) r).getProto();
r = new AllocateResponsePBImpl(p);
// check value
Assert
.assertEquals(incContainers.size(), r.getIncreasedContainers().size());
Assert
.assertEquals(decContainers.size(), r.getDecreasedContainers().size());
for (int i = 0; i < incContainers.size(); i++) {
Assert.assertEquals(i, r.getIncreasedContainers().get(i).getCapability()
.getVirtualCores());
}
for (int i = 0; i < decContainers.size(); i++) {
Assert.assertEquals(i, r.getDecreasedContainers().get(i).getCapability()
.getVirtualCores());
}
}
@SuppressWarnings("deprecation")
@Test
public void testAllocateResponseWithoutIncDecContainers() {
AllocateResponse r =
AllocateResponse.newInstance(3, new ArrayList<ContainerStatus>(),
new ArrayList<Container>(), new ArrayList<NodeReport>(), null,
AMCommand.AM_RESYNC, 3, null, new ArrayList<NMToken>(), null, null);
// serde
AllocateResponseProto p = ((AllocateResponsePBImpl) r).getProto();
r = new AllocateResponsePBImpl(p);
// check value
Assert.assertEquals(0, r.getIncreasedContainers().size());
Assert.assertEquals(0, r.getDecreasedContainers().size());
}
}
| 4,679 | 39.695652 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationAttemptId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.junit.Test;
public class TestApplicationAttemptId {
@Test
public void testApplicationAttemptId() {
ApplicationAttemptId a1 = createAppAttemptId(10l, 1, 1);
ApplicationAttemptId a2 = createAppAttemptId(10l, 1, 2);
ApplicationAttemptId a3 = createAppAttemptId(10l, 2, 1);
ApplicationAttemptId a4 = createAppAttemptId(8l, 1, 4);
ApplicationAttemptId a5 = createAppAttemptId(10l, 1, 1);
Assert.assertTrue(a1.equals(a5));
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a3));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.compareTo(a5) == 0);
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) < 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a5.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a4.hashCode());
long ts = System.currentTimeMillis();
ApplicationAttemptId a6 = createAppAttemptId(ts, 543627, 33492611);
Assert.assertEquals("appattempt_10_0001_000001", a1.toString());
Assert.assertEquals("appattempt_" + ts + "_543627_33492611", a6.toString());
}
private ApplicationAttemptId createAppAttemptId(
long clusterTimeStamp, int id, int attemptId) {
ApplicationId appId = ApplicationId.newInstance(clusterTimeStamp, id);
return ApplicationAttemptId.newInstance(appId, attemptId);
}
public static void main(String[] args) throws Exception {
TestApplicationAttemptId t = new TestApplicationAttemptId();
t.testApplicationAttemptId();
}
}
| 2,674 | 37.214286 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.lang.math.LongRange;
import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.junit.Assert;
import org.junit.Test;
public class TestGetApplicationsRequest {
@Test
public void testGetApplicationsRequest(){
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
EnumSet<YarnApplicationState> appStates =
EnumSet.of(YarnApplicationState.ACCEPTED);
request.setApplicationStates(appStates);
Set<String> tags = new HashSet<String>();
tags.add("tag1");
request.setApplicationTags(tags);
Set<String> types = new HashSet<String>();
types.add("type1");
request.setApplicationTypes(types);
long startBegin = System.currentTimeMillis();
long startEnd = System.currentTimeMillis() + 1;
request.setStartRange(startBegin, startEnd);
long finishBegin = System.currentTimeMillis() + 2;
long finishEnd = System.currentTimeMillis() + 3;
request.setFinishRange(finishBegin, finishEnd);
long limit = 100L;
request.setLimit(limit);
Set<String> queues = new HashSet<String>();
queues.add("queue1");
request.setQueues(queues);
Set<String> users = new HashSet<String>();
users.add("user1");
request.setUsers(users);
ApplicationsRequestScope scope = ApplicationsRequestScope.ALL;
request.setScope(scope);
GetApplicationsRequest requestFromProto = new GetApplicationsRequestPBImpl(
((GetApplicationsRequestPBImpl)request).getProto());
// verify the whole record equals with original record
Assert.assertEquals(requestFromProto, request);
// verify all properties are the same as original request
Assert.assertEquals(
"ApplicationStates from proto is not the same with original request",
requestFromProto.getApplicationStates(), appStates);
Assert.assertEquals(
"ApplicationTags from proto is not the same with original request",
requestFromProto.getApplicationTags(), tags);
Assert.assertEquals(
"ApplicationTypes from proto is not the same with original request",
requestFromProto.getApplicationTypes(), types);
Assert.assertEquals(
"StartRange from proto is not the same with original request",
requestFromProto.getStartRange(), new LongRange(startBegin, startEnd));
Assert.assertEquals(
"FinishRange from proto is not the same with original request",
requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd));
Assert.assertEquals(
"Limit from proto is not the same with original request",
requestFromProto.getLimit(), limit);
Assert.assertEquals(
"Queues from proto is not the same with original request",
requestFromProto.getQueues(), queues);
Assert.assertEquals(
"Users from proto is not the same with original request",
requestFromProto.getUsers(), users);
}
}
| 4,165 | 35.867257 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Assert;
import org.junit.Test;
public class TestApplicatonReport {
@Test
public void testApplicationReport() {
long timestamp = System.currentTimeMillis();
ApplicationReport appReport1 =
createApplicationReport(1, 1, timestamp);
ApplicationReport appReport2 =
createApplicationReport(1, 1, timestamp);
ApplicationReport appReport3 =
createApplicationReport(1, 1, timestamp);
Assert.assertEquals(appReport1, appReport2);
Assert.assertEquals(appReport2, appReport3);
appReport1.setApplicationId(null);
Assert.assertNull(appReport1.getApplicationId());
Assert.assertNotSame(appReport1, appReport2);
appReport2.setCurrentApplicationAttemptId(null);
Assert.assertNull(appReport2.getCurrentApplicationAttemptId());
Assert.assertNotSame(appReport2, appReport3);
Assert.assertNull(appReport1.getAMRMToken());
}
protected static ApplicationReport createApplicationReport(
int appIdInt, int appAttemptIdInt, long timestamp) {
ApplicationId appId = ApplicationId.newInstance(timestamp, appIdInt);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, appAttemptIdInt);
ApplicationReport appReport =
ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
"appname", "host", 124, null, YarnApplicationState.FINISHED,
"diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
"N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
null,false);
return appReport;
}
}
| 2,806 | 40.279412 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestAllocateRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
import org.junit.Test;
public class TestAllocateRequest {
@Test
public void testAllcoateRequestWithIncrease() {
List<ContainerResourceIncreaseRequest> incRequests =
new ArrayList<ContainerResourceIncreaseRequest>();
for (int i = 0; i < 3; i++) {
incRequests.add(ContainerResourceIncreaseRequest.newInstance(null,
Resource.newInstance(0, i)));
}
AllocateRequest r =
AllocateRequest.newInstance(123, 0f, null, null, null, incRequests);
// serde
AllocateRequestProto p = ((AllocateRequestPBImpl) r).getProto();
r = new AllocateRequestPBImpl(p);
// check value
Assert.assertEquals(123, r.getResponseId());
Assert.assertEquals(incRequests.size(), r.getIncreaseRequests().size());
for (int i = 0; i < incRequests.size(); i++) {
Assert.assertEquals(r.getIncreaseRequests().get(i).getCapability()
.getVirtualCores(), incRequests.get(i).getCapability()
.getVirtualCores());
}
}
@Test
public void testAllcoateRequestWithoutIncrease() {
AllocateRequest r =
AllocateRequest.newInstance(123, 0f, null, null, null, null);
// serde
AllocateRequestProto p = ((AllocateRequestPBImpl) r).getProto();
r = new AllocateRequestPBImpl(p);
// check value
Assert.assertEquals(123, r.getResponseId());
Assert.assertEquals(0, r.getIncreaseRequests().size());
}
}
| 2,672 | 35.121622 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.junit.Test;
public class TestContainerId {
@Test
public void testContainerId() {
ContainerId c1 = newContainerId(1, 1, 10l, 1);
ContainerId c2 = newContainerId(1, 1, 10l, 2);
ContainerId c3 = newContainerId(1, 1, 10l, 1);
ContainerId c4 = newContainerId(1, 3, 10l, 1);
ContainerId c5 = newContainerId(1, 3, 8l, 1);
Assert.assertTrue(c1.equals(c3));
Assert.assertFalse(c1.equals(c2));
Assert.assertFalse(c1.equals(c4));
Assert.assertFalse(c1.equals(c5));
Assert.assertTrue(c1.compareTo(c3) == 0);
Assert.assertTrue(c1.compareTo(c2) < 0);
Assert.assertTrue(c1.compareTo(c4) < 0);
Assert.assertTrue(c1.compareTo(c5) > 0);
Assert.assertTrue(c1.hashCode() == c3.hashCode());
Assert.assertFalse(c1.hashCode() == c2.hashCode());
Assert.assertFalse(c1.hashCode() == c4.hashCode());
Assert.assertFalse(c1.hashCode() == c5.hashCode());
long ts = System.currentTimeMillis();
ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811);
Assert.assertEquals("container_10_0001_01_000001", c1.toString());
Assert.assertEquals(25645811, 0xffffffffffL & c6.getContainerId());
Assert.assertEquals(0, c6.getContainerId() >> 40);
Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",
c6.toString());
ContainerId c7 = newContainerId(36473, 4365472, ts, 4298334883325L);
Assert.assertEquals(999799999997L, 0xffffffffffL & c7.getContainerId());
Assert.assertEquals(3, c7.getContainerId() >> 40);
Assert.assertEquals(
"container_e03_" + ts + "_36473_4365472_999799999997",
c7.toString());
ContainerId c8 = newContainerId(36473, 4365472, ts, 844424930131965L);
Assert.assertEquals(1099511627773L, 0xffffffffffL & c8.getContainerId());
Assert.assertEquals(767, c8.getContainerId() >> 40);
Assert.assertEquals(
"container_e767_" + ts + "_36473_4365472_1099511627773",
c8.toString());
}
public static ContainerId newContainerId(int appId, int appAttemptId,
long timestamp, long containerId) {
ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, appAttemptId);
return ContainerId.newContainerId(applicationAttemptId, containerId);
}
}
| 3,409 | 39.117647 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreaseRequestPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseRequestProto;
import org.junit.Test;
public class TestContainerResourceIncreaseRequest {
@Test
public void ContainerResourceIncreaseRequest() {
ContainerId containerId = ContainerId
.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceIncreaseRequest context = ContainerResourceIncreaseRequest
.newInstance(containerId, resource);
// to proto and get it back
ContainerResourceIncreaseRequestProto proto =
((ContainerResourceIncreaseRequestPBImpl) context).getProto();
ContainerResourceIncreaseRequest contextRecover =
new ContainerResourceIncreaseRequestPBImpl(proto);
// check value
Assert.assertEquals(contextRecover.getContainerId(), containerId);
Assert.assertEquals(contextRecover.getCapability(), resource);
}
@Test
public void testResourceChangeContextWithNullField() {
ContainerResourceIncreaseRequest context = ContainerResourceIncreaseRequest
.newInstance(null, null);
// to proto and get it back
ContainerResourceIncreaseRequestProto proto =
((ContainerResourceIncreaseRequestPBImpl) context).getProto();
ContainerResourceIncreaseRequest contextRecover =
new ContainerResourceIncreaseRequestPBImpl(proto);
// check value
Assert.assertNull(contextRecover.getContainerId());
Assert.assertNull(contextRecover.getCapability());
}
}
| 2,839 | 40.15942 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestNodeId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.junit.Test;
public class TestNodeId {
@Test
public void testNodeId() {
NodeId nodeId1 = NodeId.newInstance("10.18.52.124", 8041);
NodeId nodeId2 = NodeId.newInstance("10.18.52.125", 8038);
NodeId nodeId3 = NodeId.newInstance("10.18.52.124", 8041);
NodeId nodeId4 = NodeId.newInstance("10.18.52.124", 8039);
Assert.assertTrue(nodeId1.equals(nodeId3));
Assert.assertFalse(nodeId1.equals(nodeId2));
Assert.assertFalse(nodeId3.equals(nodeId4));
Assert.assertTrue(nodeId1.compareTo(nodeId3) == 0);
Assert.assertTrue(nodeId1.compareTo(nodeId2) < 0);
Assert.assertTrue(nodeId3.compareTo(nodeId4) > 0);
Assert.assertTrue(nodeId1.hashCode() == nodeId3.hashCode());
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
Assert.assertEquals("10.18.52.124:8041", nodeId1.toString());
}
}
| 1,846 | 35.94 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.junit.Test;
public class TestApplicationId {
@Test
public void testApplicationId() {
ApplicationId a1 = ApplicationId.newInstance(10l, 1);
ApplicationId a2 = ApplicationId.newInstance(10l, 2);
ApplicationId a3 = ApplicationId.newInstance(10l, 1);
ApplicationId a4 = ApplicationId.newInstance(8l, 3);
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.equals(a3));
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) == 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a2.hashCode() == a4.hashCode());
long ts = System.currentTimeMillis();
ApplicationId a5 = ApplicationId.newInstance(ts, 45436343);
Assert.assertEquals("application_10_0001", a1.toString());
Assert.assertEquals("application_" + ts + "_45436343", a5.toString());
}
}
| 1,939 | 34.925926 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.io.IOException;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import org.apache.commons.lang.math.LongRange;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.PreemptionContainer;
import org.apache.hadoop.yarn.api.records.PreemptionContract;
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueStatistics;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.ReservationRequests;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceDecreasePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreasePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreaseRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionResourceRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceDecreaseProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto;
import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto;
import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
public class TestPBImplRecords {
static final Log LOG = LogFactory.getLog(TestPBImplRecords.class);
private static HashMap<Type, Object> typeValueCache = new HashMap<Type, Object>();
private static Random rand = new Random();
private static byte [] bytes = new byte[] {'1', '2', '3', '4'};
@SuppressWarnings({"rawtypes", "unchecked"})
private static Object genTypeValue(Type type) {
Object ret = typeValueCache.get(type);
if (ret != null) {
return ret;
}
// only use positive primitive values
if (type.equals(boolean.class)) {
return rand.nextBoolean();
} else if (type.equals(byte.class)) {
return bytes[rand.nextInt(4)];
} else if (type.equals(int.class)) {
return rand.nextInt(1000000);
} else if (type.equals(long.class)) {
return Long.valueOf(rand.nextInt(1000000));
} else if (type.equals(float.class)) {
return rand.nextFloat();
} else if (type.equals(double.class)) {
return rand.nextDouble();
} else if (type.equals(String.class)) {
return String.format("%c%c%c",
'a' + rand.nextInt(26),
'a' + rand.nextInt(26),
'a' + rand.nextInt(26));
} else if (type instanceof Class) {
Class clazz = (Class)type;
if (clazz.isArray()) {
Class compClass = clazz.getComponentType();
if (compClass != null) {
ret = Array.newInstance(compClass, 2);
Array.set(ret, 0, genTypeValue(compClass));
Array.set(ret, 1, genTypeValue(compClass));
}
} else if (clazz.isEnum()) {
Object [] values = clazz.getEnumConstants();
ret = values[rand.nextInt(values.length)];
} else if (clazz.equals(ByteBuffer.class)) {
// return new ByteBuffer every time
// to prevent potential side effects
ByteBuffer buff = ByteBuffer.allocate(4);
rand.nextBytes(buff.array());
return buff;
}
} else if (type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType)type;
Type rawType = pt.getRawType();
Type [] params = pt.getActualTypeArguments();
// only support EnumSet<T>, List<T>, Set<T>, Map<K,V>
if (rawType.equals(EnumSet.class)) {
if (params[0] instanceof Class) {
Class c = (Class)(params[0]);
return EnumSet.allOf(c);
}
} if (rawType.equals(List.class)) {
ret = Lists.newArrayList(genTypeValue(params[0]));
} else if (rawType.equals(Set.class)) {
ret = Sets.newHashSet(genTypeValue(params[0]));
} else if (rawType.equals(Map.class)) {
Map<Object, Object> map = Maps.newHashMap();
map.put(genTypeValue(params[0]), genTypeValue(params[1]));
ret = map;
}
}
if (ret == null) {
throw new IllegalArgumentException("type " + type + " is not supported");
}
typeValueCache.put(type, ret);
return ret;
}
/**
* this method generate record instance by calling newIntance
* using reflection, add register the generated value to typeValueCache
*/
@SuppressWarnings("rawtypes")
private static Object generateByNewInstance(Class clazz) throws Exception {
Object ret = typeValueCache.get(clazz);
if (ret != null) {
return ret;
}
Method newInstance = null;
Type [] paramTypes = new Type[0];
// get newInstance method with most parameters
for (Method m : clazz.getMethods()) {
int mod = m.getModifiers();
if (m.getDeclaringClass().equals(clazz) &&
Modifier.isPublic(mod) &&
Modifier.isStatic(mod) &&
m.getName().equals("newInstance")) {
Type [] pts = m.getGenericParameterTypes();
if (newInstance == null
|| (pts.length > paramTypes.length)) {
newInstance = m;
paramTypes = pts;
}
}
}
if (newInstance == null) {
throw new IllegalArgumentException("type " + clazz.getName() +
" does not have newInstance method");
}
Object [] args = new Object[paramTypes.length];
for (int i=0;i<args.length;i++) {
args[i] = genTypeValue(paramTypes[i]);
}
ret = newInstance.invoke(null, args);
typeValueCache.put(clazz, ret);
return ret;
}
@BeforeClass
public static void setup() throws Exception {
typeValueCache.put(LongRange.class, new LongRange(1000, 2000));
typeValueCache.put(URL.class, URL.newInstance(
"http", "localhost", 8080, "file0"));
typeValueCache.put(SerializedException.class,
SerializedException.newInstance(new IOException("exception for test")));
generateByNewInstance(LogAggregationContext.class);
generateByNewInstance(ApplicationId.class);
generateByNewInstance(ApplicationAttemptId.class);
generateByNewInstance(ContainerId.class);
generateByNewInstance(Resource.class);
generateByNewInstance(ResourceBlacklistRequest.class);
generateByNewInstance(ResourceOption.class);
generateByNewInstance(LocalResource.class);
generateByNewInstance(Priority.class);
generateByNewInstance(NodeId.class);
generateByNewInstance(NodeReport.class);
generateByNewInstance(Token.class);
generateByNewInstance(NMToken.class);
generateByNewInstance(ResourceRequest.class);
generateByNewInstance(ApplicationAttemptReport.class);
generateByNewInstance(ApplicationResourceUsageReport.class);
generateByNewInstance(ApplicationReport.class);
generateByNewInstance(Container.class);
generateByNewInstance(ContainerLaunchContext.class);
generateByNewInstance(ApplicationSubmissionContext.class);
generateByNewInstance(ContainerReport.class);
generateByNewInstance(ContainerResourceDecrease.class);
generateByNewInstance(ContainerResourceIncrease.class);
generateByNewInstance(ContainerResourceIncreaseRequest.class);
generateByNewInstance(ContainerStatus.class);
generateByNewInstance(PreemptionContainer.class);
generateByNewInstance(PreemptionResourceRequest.class);
generateByNewInstance(PreemptionContainer.class);
generateByNewInstance(PreemptionContract.class);
generateByNewInstance(StrictPreemptionContract.class);
generateByNewInstance(PreemptionMessage.class);
generateByNewInstance(StartContainerRequest.class);
generateByNewInstance(NodeLabel.class);
// genByNewInstance does not apply to QueueInfo, cause
// it is recursive(has sub queues)
typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"),
"x && y", null));
generateByNewInstance(QueueStatistics.class);
generateByNewInstance(QueueUserACLInfo.class);
generateByNewInstance(YarnClusterMetrics.class);
// for reservation system
generateByNewInstance(ReservationId.class);
generateByNewInstance(ReservationRequest.class);
generateByNewInstance(ReservationRequests.class);
generateByNewInstance(ReservationDefinition.class);
}
private class GetSetPair {
public String propertyName;
public Method getMethod;
public Method setMethod;
public Type type;
public Object testValue;
@Override
public String toString() {
return String.format("{ name=%s, class=%s, value=%s }", propertyName,
type, testValue);
}
}
private <R> Map<String, GetSetPair> getGetSetPairs(Class<R> recordClass)
throws Exception {
Map<String, GetSetPair> ret = new HashMap<String, GetSetPair>();
Method [] methods = recordClass.getDeclaredMethods();
// get all get methods
for (int i = 0; i < methods.length; i++) {
Method m = methods[i];
int mod = m.getModifiers();
if (m.getDeclaringClass().equals(recordClass) &&
Modifier.isPublic(mod) &&
(!Modifier.isStatic(mod))) {
String name = m.getName();
if (name.equals("getProto")) {
continue;
}
if ((name.length() > 3) && name.startsWith("get") &&
(m.getParameterTypes().length == 0)) {
String propertyName = name.substring(3);
Type valueType = m.getGenericReturnType();
GetSetPair p = ret.get(propertyName);
if (p == null) {
p = new GetSetPair();
p.propertyName = propertyName;
p.type = valueType;
p.getMethod = m;
ret.put(propertyName, p);
} else {
Assert.fail("Multiple get method with same name: " + recordClass
+ p.propertyName);
}
}
}
}
// match get methods with set methods
for (int i = 0; i < methods.length; i++) {
Method m = methods[i];
int mod = m.getModifiers();
if (m.getDeclaringClass().equals(recordClass) &&
Modifier.isPublic(mod) &&
(!Modifier.isStatic(mod))) {
String name = m.getName();
if (name.startsWith("set") && (m.getParameterTypes().length == 1)) {
String propertyName = name.substring(3);
Type valueType = m.getGenericParameterTypes()[0];
GetSetPair p = ret.get(propertyName);
if (p != null && p.type.equals(valueType)) {
p.setMethod = m;
}
}
}
}
// exclude incomplete get/set pair, and generate test value
Iterator<Entry<String, GetSetPair>> itr = ret.entrySet().iterator();
while (itr.hasNext()) {
Entry<String, GetSetPair> cur = itr.next();
GetSetPair gsp = cur.getValue();
if ((gsp.getMethod == null) ||
(gsp.setMethod == null)) {
LOG.info(String.format("Exclude protential property: %s\n", gsp.propertyName));
itr.remove();
} else {
LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type));
gsp.testValue = genTypeValue(gsp.type);
LOG.info(String.format(" testValue: %s\n", gsp.testValue));
}
}
return ret;
}
private <R, P> void validatePBImplRecord(Class<R> recordClass,
Class<P> protoClass)
throws Exception {
LOG.info(String.format("Validate %s %s\n", recordClass.getName(),
protoClass.getName()));
Constructor<R> emptyConstructor = recordClass.getConstructor();
Constructor<R> pbConstructor = recordClass.getConstructor(protoClass);
Method getProto = recordClass.getDeclaredMethod("getProto");
Map<String, GetSetPair> getSetPairs = getGetSetPairs(recordClass);
R origRecord = emptyConstructor.newInstance();
for (GetSetPair gsp : getSetPairs.values()) {
gsp.setMethod.invoke(origRecord, gsp.testValue);
}
Object ret = getProto.invoke(origRecord);
Assert.assertNotNull(recordClass.getName() + "#getProto returns null", ret);
if (!(protoClass.isAssignableFrom(ret.getClass()))) {
Assert.fail("Illegal getProto method return type: " + ret.getClass());
}
R deserRecord = pbConstructor.newInstance(ret);
Assert.assertEquals("whole " + recordClass + " records should be equal",
origRecord, deserRecord);
for (GetSetPair gsp : getSetPairs.values()) {
Object origValue = gsp.getMethod.invoke(origRecord);
Object deserValue = gsp.getMethod.invoke(deserRecord);
Assert.assertEquals("property " + recordClass.getName() + "#"
+ gsp.propertyName + " should be equal", origValue, deserValue);
}
}
@Test
public void testAllocateRequestPBImpl() throws Exception {
validatePBImplRecord(AllocateRequestPBImpl.class, AllocateRequestProto.class);
}
@Test
public void testAllocateResponsePBImpl() throws Exception {
validatePBImplRecord(AllocateResponsePBImpl.class, AllocateResponseProto.class);
}
@Test
public void testCancelDelegationTokenRequestPBImpl() throws Exception {
validatePBImplRecord(CancelDelegationTokenRequestPBImpl.class,
CancelDelegationTokenRequestProto.class);
}
@Test
public void testCancelDelegationTokenResponsePBImpl() throws Exception {
validatePBImplRecord(CancelDelegationTokenResponsePBImpl.class,
CancelDelegationTokenResponseProto.class);
}
@Test
public void testFinishApplicationMasterRequestPBImpl() throws Exception {
validatePBImplRecord(FinishApplicationMasterRequestPBImpl.class,
FinishApplicationMasterRequestProto.class);
}
@Test
public void testFinishApplicationMasterResponsePBImpl() throws Exception {
validatePBImplRecord(FinishApplicationMasterResponsePBImpl.class,
FinishApplicationMasterResponseProto.class);
}
@Test
public void testGetApplicationAttemptReportRequestPBImpl() throws Exception {
validatePBImplRecord(GetApplicationAttemptReportRequestPBImpl.class,
GetApplicationAttemptReportRequestProto.class);
}
@Test
public void testGetApplicationAttemptReportResponsePBImpl() throws Exception {
validatePBImplRecord(GetApplicationAttemptReportResponsePBImpl.class,
GetApplicationAttemptReportResponseProto.class);
}
@Test
public void testGetApplicationAttemptsRequestPBImpl() throws Exception {
validatePBImplRecord(GetApplicationAttemptsRequestPBImpl.class,
GetApplicationAttemptsRequestProto.class);
}
@Test
public void testGetApplicationAttemptsResponsePBImpl() throws Exception {
validatePBImplRecord(GetApplicationAttemptsResponsePBImpl.class,
GetApplicationAttemptsResponseProto.class);
}
@Test
public void testGetApplicationReportRequestPBImpl() throws Exception {
validatePBImplRecord(GetApplicationReportRequestPBImpl.class,
GetApplicationReportRequestProto.class);
}
@Test
public void testGetApplicationReportResponsePBImpl() throws Exception {
validatePBImplRecord(GetApplicationReportResponsePBImpl.class,
GetApplicationReportResponseProto.class);
}
@Test
public void testGetApplicationsRequestPBImpl() throws Exception {
validatePBImplRecord(GetApplicationsRequestPBImpl.class,
GetApplicationsRequestProto.class);
}
@Test
public void testGetApplicationsResponsePBImpl() throws Exception {
validatePBImplRecord(GetApplicationsResponsePBImpl.class,
GetApplicationsResponseProto.class);
}
@Test
public void testGetClusterMetricsRequestPBImpl() throws Exception {
validatePBImplRecord(GetClusterMetricsRequestPBImpl.class,
GetClusterMetricsRequestProto.class);
}
@Test
public void testGetClusterMetricsResponsePBImpl() throws Exception {
validatePBImplRecord(GetClusterMetricsResponsePBImpl.class,
GetClusterMetricsResponseProto.class);
}
@Test
public void testGetClusterNodesRequestPBImpl() throws Exception {
validatePBImplRecord(GetClusterNodesRequestPBImpl.class,
GetClusterNodesRequestProto.class);
}
@Test
public void testGetClusterNodesResponsePBImpl() throws Exception {
validatePBImplRecord(GetClusterNodesResponsePBImpl.class,
GetClusterNodesResponseProto.class);
}
@Test
public void testGetContainerReportRequestPBImpl() throws Exception {
validatePBImplRecord(GetContainerReportRequestPBImpl.class,
GetContainerReportRequestProto.class);
}
@Test
public void testGetContainerReportResponsePBImpl() throws Exception {
validatePBImplRecord(GetContainerReportResponsePBImpl.class,
GetContainerReportResponseProto.class);
}
@Test
public void testGetContainersRequestPBImpl() throws Exception {
validatePBImplRecord(GetContainersRequestPBImpl.class,
GetContainersRequestProto.class);
}
@Test
public void testGetContainersResponsePBImpl() throws Exception {
validatePBImplRecord(GetContainersResponsePBImpl.class,
GetContainersResponseProto.class);
}
@Test
public void testGetContainerStatusesRequestPBImpl() throws Exception {
validatePBImplRecord(GetContainerStatusesRequestPBImpl.class,
GetContainerStatusesRequestProto.class);
}
@Test
public void testGetContainerStatusesResponsePBImpl() throws Exception {
validatePBImplRecord(GetContainerStatusesResponsePBImpl.class,
GetContainerStatusesResponseProto.class);
}
@Test
public void testGetDelegationTokenRequestPBImpl() throws Exception {
validatePBImplRecord(GetDelegationTokenRequestPBImpl.class,
GetDelegationTokenRequestProto.class);
}
@Test
public void testGetDelegationTokenResponsePBImpl() throws Exception {
validatePBImplRecord(GetDelegationTokenResponsePBImpl.class,
GetDelegationTokenResponseProto.class);
}
@Test
public void testGetNewApplicationRequestPBImpl() throws Exception {
validatePBImplRecord(GetNewApplicationRequestPBImpl.class,
GetNewApplicationRequestProto.class);
}
@Test
public void testGetNewApplicationResponsePBImpl() throws Exception {
validatePBImplRecord(GetNewApplicationResponsePBImpl.class,
GetNewApplicationResponseProto.class);
}
@Test
public void testGetQueueInfoRequestPBImpl() throws Exception {
validatePBImplRecord(GetQueueInfoRequestPBImpl.class,
GetQueueInfoRequestProto.class);
}
@Test
public void testGetQueueInfoResponsePBImpl() throws Exception {
validatePBImplRecord(GetQueueInfoResponsePBImpl.class,
GetQueueInfoResponseProto.class);
}
@Test
public void testGetQueueUserAclsInfoRequestPBImpl() throws Exception {
validatePBImplRecord(GetQueueUserAclsInfoRequestPBImpl.class,
GetQueueUserAclsInfoRequestProto.class);
}
@Test
public void testGetQueueUserAclsInfoResponsePBImpl() throws Exception {
validatePBImplRecord(GetQueueUserAclsInfoResponsePBImpl.class,
GetQueueUserAclsInfoResponseProto.class);
}
@Test
public void testKillApplicationRequestPBImpl() throws Exception {
validatePBImplRecord(KillApplicationRequestPBImpl.class,
KillApplicationRequestProto.class);
}
@Test
public void testKillApplicationResponsePBImpl() throws Exception {
validatePBImplRecord(KillApplicationResponsePBImpl.class,
KillApplicationResponseProto.class);
}
@Test
public void testMoveApplicationAcrossQueuesRequestPBImpl() throws Exception {
validatePBImplRecord(MoveApplicationAcrossQueuesRequestPBImpl.class,
MoveApplicationAcrossQueuesRequestProto.class);
}
@Test
public void testMoveApplicationAcrossQueuesResponsePBImpl() throws Exception {
validatePBImplRecord(MoveApplicationAcrossQueuesResponsePBImpl.class,
MoveApplicationAcrossQueuesResponseProto.class);
}
@Test
public void testRegisterApplicationMasterRequestPBImpl() throws Exception {
validatePBImplRecord(RegisterApplicationMasterRequestPBImpl.class,
RegisterApplicationMasterRequestProto.class);
}
@Test
public void testRegisterApplicationMasterResponsePBImpl() throws Exception {
validatePBImplRecord(RegisterApplicationMasterResponsePBImpl.class,
RegisterApplicationMasterResponseProto.class);
}
@Test
public void testRenewDelegationTokenRequestPBImpl() throws Exception {
validatePBImplRecord(RenewDelegationTokenRequestPBImpl.class,
RenewDelegationTokenRequestProto.class);
}
@Test
public void testRenewDelegationTokenResponsePBImpl() throws Exception {
validatePBImplRecord(RenewDelegationTokenResponsePBImpl.class,
RenewDelegationTokenResponseProto.class);
}
@Test
public void testStartContainerRequestPBImpl() throws Exception {
validatePBImplRecord(StartContainerRequestPBImpl.class,
StartContainerRequestProto.class);
}
@Test
public void testStartContainersRequestPBImpl() throws Exception {
validatePBImplRecord(StartContainersRequestPBImpl.class,
StartContainersRequestProto.class);
}
@Test
public void testStartContainersResponsePBImpl() throws Exception {
validatePBImplRecord(StartContainersResponsePBImpl.class,
StartContainersResponseProto.class);
}
@Test
public void testStopContainersRequestPBImpl() throws Exception {
validatePBImplRecord(StopContainersRequestPBImpl.class,
StopContainersRequestProto.class);
}
@Test
public void testStopContainersResponsePBImpl() throws Exception {
validatePBImplRecord(StopContainersResponsePBImpl.class,
StopContainersResponseProto.class);
}
@Test
public void testSubmitApplicationRequestPBImpl() throws Exception {
validatePBImplRecord(SubmitApplicationRequestPBImpl.class,
SubmitApplicationRequestProto.class);
}
@Test
public void testSubmitApplicationResponsePBImpl() throws Exception {
validatePBImplRecord(SubmitApplicationResponsePBImpl.class,
SubmitApplicationResponseProto.class);
}
@Test
@Ignore
// ignore cause ApplicationIdPBImpl is immutable
public void testApplicationAttemptIdPBImpl() throws Exception {
validatePBImplRecord(ApplicationAttemptIdPBImpl.class,
ApplicationAttemptIdProto.class);
}
@Test
public void testApplicationAttemptReportPBImpl() throws Exception {
validatePBImplRecord(ApplicationAttemptReportPBImpl.class,
ApplicationAttemptReportProto.class);
}
@Test
@Ignore
// ignore cause ApplicationIdPBImpl is immutable
public void testApplicationIdPBImpl() throws Exception {
validatePBImplRecord(ApplicationIdPBImpl.class, ApplicationIdProto.class);
}
@Test
public void testApplicationReportPBImpl() throws Exception {
validatePBImplRecord(ApplicationReportPBImpl.class,
ApplicationReportProto.class);
}
@Test
public void testApplicationResourceUsageReportPBImpl() throws Exception {
validatePBImplRecord(ApplicationResourceUsageReportPBImpl.class,
ApplicationResourceUsageReportProto.class);
}
@Test
public void testApplicationSubmissionContextPBImpl() throws Exception {
validatePBImplRecord(ApplicationSubmissionContextPBImpl.class,
ApplicationSubmissionContextProto.class);
ApplicationSubmissionContext ctx =
ApplicationSubmissionContext.newInstance(null, null, null, null, null,
false, false, 0, Resources.none(), null, false, null, null);
Assert.assertNotNull(ctx.getResource());
}
@Test
@Ignore
// ignore cause ApplicationIdPBImpl is immutable
public void testContainerIdPBImpl() throws Exception {
validatePBImplRecord(ContainerIdPBImpl.class, ContainerIdProto.class);
}
@Test
public void testContainerLaunchContextPBImpl() throws Exception {
validatePBImplRecord(ContainerLaunchContextPBImpl.class,
ContainerLaunchContextProto.class);
}
@Test
public void testContainerPBImpl() throws Exception {
validatePBImplRecord(ContainerPBImpl.class, ContainerProto.class);
}
@Test
public void testContainerReportPBImpl() throws Exception {
validatePBImplRecord(ContainerReportPBImpl.class, ContainerReportProto.class);
}
@Test
public void testContainerResourceDecreasePBImpl() throws Exception {
validatePBImplRecord(ContainerResourceDecreasePBImpl.class,
ContainerResourceDecreaseProto.class);
}
@Test
public void testContainerResourceIncreasePBImpl() throws Exception {
validatePBImplRecord(ContainerResourceIncreasePBImpl.class,
ContainerResourceIncreaseProto.class);
}
@Test
public void testContainerResourceIncreaseRequestPBImpl() throws Exception {
validatePBImplRecord(ContainerResourceIncreaseRequestPBImpl.class,
ContainerResourceIncreaseRequestProto.class);
}
@Test
public void testContainerStatusPBImpl() throws Exception {
validatePBImplRecord(ContainerStatusPBImpl.class, ContainerStatusProto.class);
}
@Test
public void testLocalResourcePBImpl() throws Exception {
validatePBImplRecord(LocalResourcePBImpl.class, LocalResourceProto.class);
}
@Test
public void testNMTokenPBImpl() throws Exception {
validatePBImplRecord(NMTokenPBImpl.class, NMTokenProto.class);
}
@Test
@Ignore
// ignore cause ApplicationIdPBImpl is immutable
public void testNodeIdPBImpl() throws Exception {
validatePBImplRecord(NodeIdPBImpl.class, NodeIdProto.class);
}
@Test
public void testNodeReportPBImpl() throws Exception {
validatePBImplRecord(NodeReportPBImpl.class, NodeReportProto.class);
}
@Test
public void testPreemptionContainerPBImpl() throws Exception {
validatePBImplRecord(PreemptionContainerPBImpl.class,
PreemptionContainerProto.class);
}
@Test
public void testPreemptionContractPBImpl() throws Exception {
validatePBImplRecord(PreemptionContractPBImpl.class,
PreemptionContractProto.class);
}
@Test
public void testPreemptionMessagePBImpl() throws Exception {
validatePBImplRecord(PreemptionMessagePBImpl.class,
PreemptionMessageProto.class);
}
@Test
public void testPreemptionResourceRequestPBImpl() throws Exception {
validatePBImplRecord(PreemptionResourceRequestPBImpl.class,
PreemptionResourceRequestProto.class);
}
@Test
public void testPriorityPBImpl() throws Exception {
validatePBImplRecord(PriorityPBImpl.class, PriorityProto.class);
}
@Test
public void testQueueInfoPBImpl() throws Exception {
validatePBImplRecord(QueueInfoPBImpl.class, QueueInfoProto.class);
}
@Test
public void testQueueUserACLInfoPBImpl() throws Exception {
validatePBImplRecord(QueueUserACLInfoPBImpl.class,
QueueUserACLInfoProto.class);
}
@Test
public void testResourceBlacklistRequestPBImpl() throws Exception {
validatePBImplRecord(ResourceBlacklistRequestPBImpl.class,
ResourceBlacklistRequestProto.class);
}
@Test
@Ignore
// ignore as ResourceOptionPBImpl is immutable
public void testResourceOptionPBImpl() throws Exception {
validatePBImplRecord(ResourceOptionPBImpl.class, ResourceOptionProto.class);
}
@Test
public void testResourcePBImpl() throws Exception {
validatePBImplRecord(ResourcePBImpl.class, ResourceProto.class);
}
@Test
public void testResourceRequestPBImpl() throws Exception {
validatePBImplRecord(ResourceRequestPBImpl.class, ResourceRequestProto.class);
}
@Test
public void testSerializedExceptionPBImpl() throws Exception {
validatePBImplRecord(SerializedExceptionPBImpl.class,
SerializedExceptionProto.class);
}
@Test
public void testStrictPreemptionContractPBImpl() throws Exception {
validatePBImplRecord(StrictPreemptionContractPBImpl.class,
StrictPreemptionContractProto.class);
}
@Test
public void testTokenPBImpl() throws Exception {
validatePBImplRecord(TokenPBImpl.class, TokenProto.class);
}
@Test
public void testURLPBImpl() throws Exception {
validatePBImplRecord(URLPBImpl.class, URLProto.class);
}
@Test
public void testYarnClusterMetricsPBImpl() throws Exception {
validatePBImplRecord(YarnClusterMetricsPBImpl.class,
YarnClusterMetricsProto.class);
}
@Test
public void testRefreshAdminAclsRequestPBImpl() throws Exception {
validatePBImplRecord(RefreshAdminAclsRequestPBImpl.class,
RefreshAdminAclsRequestProto.class);
}
@Test
public void testRefreshAdminAclsResponsePBImpl() throws Exception {
validatePBImplRecord(RefreshAdminAclsResponsePBImpl.class,
RefreshAdminAclsResponseProto.class);
}
@Test
public void testRefreshNodesRequestPBImpl() throws Exception {
validatePBImplRecord(RefreshNodesRequestPBImpl.class,
RefreshNodesRequestProto.class);
}
@Test
public void testRefreshNodesResponsePBImpl() throws Exception {
validatePBImplRecord(RefreshNodesResponsePBImpl.class,
RefreshNodesResponseProto.class);
}
@Test
public void testRefreshQueuesRequestPBImpl() throws Exception {
validatePBImplRecord(RefreshQueuesRequestPBImpl.class,
RefreshQueuesRequestProto.class);
}
@Test
public void testRefreshQueuesResponsePBImpl() throws Exception {
validatePBImplRecord(RefreshQueuesResponsePBImpl.class,
RefreshQueuesResponseProto.class);
}
@Test
public void testRefreshServiceAclsRequestPBImpl() throws Exception {
validatePBImplRecord(RefreshServiceAclsRequestPBImpl.class,
RefreshServiceAclsRequestProto.class);
}
@Test
public void testRefreshServiceAclsResponsePBImpl() throws Exception {
validatePBImplRecord(RefreshServiceAclsResponsePBImpl.class,
RefreshServiceAclsResponseProto.class);
}
@Test
public void testRefreshSuperUserGroupsConfigurationRequestPBImpl()
throws Exception {
validatePBImplRecord(RefreshSuperUserGroupsConfigurationRequestPBImpl.class,
RefreshSuperUserGroupsConfigurationRequestProto.class);
}
@Test
public void testRefreshSuperUserGroupsConfigurationResponsePBImpl()
throws Exception {
validatePBImplRecord(RefreshSuperUserGroupsConfigurationResponsePBImpl.class,
RefreshSuperUserGroupsConfigurationResponseProto.class);
}
@Test
public void testRefreshUserToGroupsMappingsRequestPBImpl() throws Exception {
validatePBImplRecord(RefreshUserToGroupsMappingsRequestPBImpl.class,
RefreshUserToGroupsMappingsRequestProto.class);
}
@Test
public void testRefreshUserToGroupsMappingsResponsePBImpl() throws Exception {
validatePBImplRecord(RefreshUserToGroupsMappingsResponsePBImpl.class,
RefreshUserToGroupsMappingsResponseProto.class);
}
@Test
public void testUpdateNodeResourceRequestPBImpl() throws Exception {
validatePBImplRecord(UpdateNodeResourceRequestPBImpl.class,
UpdateNodeResourceRequestProto.class);
}
@Test
public void testUpdateNodeResourceResponsePBImpl() throws Exception {
validatePBImplRecord(UpdateNodeResourceResponsePBImpl.class,
UpdateNodeResourceResponseProto.class);
}
@Test
public void testReservationSubmissionRequestPBImpl() throws Exception {
validatePBImplRecord(ReservationSubmissionRequestPBImpl.class,
ReservationSubmissionRequestProto.class);
}
@Test
public void testReservationSubmissionResponsePBImpl() throws Exception {
validatePBImplRecord(ReservationSubmissionResponsePBImpl.class,
ReservationSubmissionResponseProto.class);
}
@Test
public void testReservationUpdateRequestPBImpl() throws Exception {
validatePBImplRecord(ReservationUpdateRequestPBImpl.class,
ReservationUpdateRequestProto.class);
}
@Test
public void testReservationUpdateResponsePBImpl() throws Exception {
validatePBImplRecord(ReservationUpdateResponsePBImpl.class,
ReservationUpdateResponseProto.class);
}
@Test
public void testReservationDeleteRequestPBImpl() throws Exception {
validatePBImplRecord(ReservationDeleteRequestPBImpl.class,
ReservationDeleteRequestProto.class);
}
@Test
public void testReservationDeleteResponsePBImpl() throws Exception {
validatePBImplRecord(ReservationDeleteResponsePBImpl.class,
ReservationDeleteResponseProto.class);
}
@Test
public void testAddToClusterNodeLabelsRequestPBImpl() throws Exception {
validatePBImplRecord(AddToClusterNodeLabelsRequestPBImpl.class,
AddToClusterNodeLabelsRequestProto.class);
}
@Test
public void testAddToClusterNodeLabelsResponsePBImpl() throws Exception {
validatePBImplRecord(AddToClusterNodeLabelsResponsePBImpl.class,
AddToClusterNodeLabelsResponseProto.class);
}
@Test
public void testRemoveFromClusterNodeLabelsRequestPBImpl() throws Exception {
validatePBImplRecord(RemoveFromClusterNodeLabelsRequestPBImpl.class,
RemoveFromClusterNodeLabelsRequestProto.class);
}
@Test
public void testRemoveFromClusterNodeLabelsResponsePBImpl() throws Exception {
validatePBImplRecord(RemoveFromClusterNodeLabelsResponsePBImpl.class,
RemoveFromClusterNodeLabelsResponseProto.class);
}
@Test
public void testGetClusterNodeLabelsRequestPBImpl() throws Exception {
validatePBImplRecord(GetClusterNodeLabelsRequestPBImpl.class,
GetClusterNodeLabelsRequestProto.class);
}
@Test
public void testGetClusterNodeLabelsResponsePBImpl() throws Exception {
validatePBImplRecord(GetClusterNodeLabelsResponsePBImpl.class,
GetClusterNodeLabelsResponseProto.class);
}
@Test
public void testReplaceLabelsOnNodeRequestPBImpl() throws Exception {
validatePBImplRecord(ReplaceLabelsOnNodeRequestPBImpl.class,
ReplaceLabelsOnNodeRequestProto.class);
}
@Test
public void testReplaceLabelsOnNodeResponsePBImpl() throws Exception {
validatePBImplRecord(ReplaceLabelsOnNodeResponsePBImpl.class,
ReplaceLabelsOnNodeResponseProto.class);
}
@Test
public void testGetNodeToLabelsRequestPBImpl() throws Exception {
validatePBImplRecord(GetNodesToLabelsRequestPBImpl.class,
GetNodesToLabelsRequestProto.class);
}
@Test
public void testGetNodeToLabelsResponsePBImpl() throws Exception {
validatePBImplRecord(GetNodesToLabelsResponsePBImpl.class,
GetNodesToLabelsResponseProto.class);
}
@Test
public void testGetLabelsToNodesRequestPBImpl() throws Exception {
validatePBImplRecord(GetLabelsToNodesRequestPBImpl.class,
GetLabelsToNodesRequestProto.class);
}
@Test
public void testGetLabelsToNodesResponsePBImpl() throws Exception {
validatePBImplRecord(GetLabelsToNodesResponsePBImpl.class,
GetLabelsToNodesResponseProto.class);
}
@Test
public void testNodeLabelAttributesPBImpl() throws Exception {
validatePBImplRecord(NodeLabelPBImpl.class,
NodeLabelProto.class);
}
@Test
public void testCheckForDecommissioningNodesRequestPBImpl() throws Exception {
validatePBImplRecord(CheckForDecommissioningNodesRequestPBImpl.class,
CheckForDecommissioningNodesRequestProto.class);
}
@Test
public void testCheckForDecommissioningNodesResponsePBImpl() throws Exception {
validatePBImplRecord(CheckForDecommissioningNodesResponsePBImpl.class,
CheckForDecommissioningNodesResponseProto.class);
}
}
| 58,521 | 43.844444 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.util.Arrays;
import org.junit.Assert;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreasePBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseProto;
import org.junit.Test;
public class TestContainerResourceIncrease {
@Test
public void testResourceIncreaseContext() {
byte[] identifier = new byte[] { 1, 2, 3, 4 };
Token token = Token.newInstance(identifier, "", "".getBytes(), "");
ContainerId containerId = ContainerId
.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceIncrease ctx = ContainerResourceIncrease.newInstance(
containerId, resource, token);
// get proto and recover to ctx
ContainerResourceIncreaseProto proto =
((ContainerResourceIncreasePBImpl) ctx).getProto();
ctx = new ContainerResourceIncreasePBImpl(proto);
// check values
Assert.assertEquals(ctx.getCapability(), resource);
Assert.assertEquals(ctx.getContainerId(), containerId);
Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier()
.array(), identifier));
}
@Test
public void testResourceIncreaseContextWithNull() {
ContainerResourceIncrease ctx = ContainerResourceIncrease.newInstance(null,
null, null);
// get proto and recover to ctx;
ContainerResourceIncreaseProto proto =
((ContainerResourceIncreasePBImpl) ctx).getProto();
ctx = new ContainerResourceIncreasePBImpl(proto);
// check values
Assert.assertNull(ctx.getContainerToken());
Assert.assertNull(ctx.getCapability());
Assert.assertNull(ctx.getContainerId());
}
}
| 2,950 | 38.346667 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records.impl.pb;
import java.nio.channels.ClosedChannelException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
import org.junit.Assert;
import org.junit.Test;
public class TestSerializedExceptionPBImpl {
@Test
public void testSerializedException() throws Exception {
SerializedExceptionPBImpl orig = new SerializedExceptionPBImpl();
orig.init(new Exception("test exception"));
SerializedExceptionProto proto = orig.getProto();
SerializedExceptionPBImpl deser = new SerializedExceptionPBImpl(proto);
Assert.assertEquals(orig, deser);
Assert.assertEquals(orig.getMessage(), deser.getMessage());
Assert.assertEquals(orig.getRemoteTrace(), deser.getRemoteTrace());
Assert.assertEquals(orig.getCause(), deser.getCause());
}
@Test
public void testDeserialize() throws Exception {
Exception ex = new Exception("test exception");
SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
try {
pb.deSerialize();
Assert.fail("deSerialze should throw YarnRuntimeException");
} catch (YarnRuntimeException e) {
Assert.assertEquals(ClassNotFoundException.class,
e.getCause().getClass());
}
pb.init(ex);
Assert.assertEquals(ex.toString(), pb.deSerialize().toString());
}
@Test
public void testDeserializeWithDefaultConstructor() {
// Init SerializedException with an Exception with default constructor.
ClosedChannelException ex = new ClosedChannelException();
SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
pb.init(ex);
Assert.assertEquals(ex.getClass(), pb.deSerialize().getClass());
}
@Test
public void testBeforeInit() throws Exception {
SerializedExceptionProto defaultProto =
SerializedExceptionProto.newBuilder().build();
SerializedExceptionPBImpl pb1 = new SerializedExceptionPBImpl();
Assert.assertNull(pb1.getCause());
SerializedExceptionPBImpl pb2 = new SerializedExceptionPBImpl();
Assert.assertEquals(defaultProto, pb2.getProto());
SerializedExceptionPBImpl pb3 = new SerializedExceptionPBImpl();
Assert.assertEquals(defaultProto.getTrace(), pb3.getRemoteTrace());
}
@Test
public void testThrowableDeserialization() {
// java.lang.Error should also be serializable
Error ex = new Error();
SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
pb.init(ex);
Assert.assertEquals(ex.getClass(), pb.deSerialize().getClass());
}
}
| 3,398 | 36.351648 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records.timeline;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.WeakHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.junit.Assert;
import org.junit.Test;
public class TestTimelineRecords {
private static final Log LOG =
LogFactory.getLog(TestTimelineRecords.class);
@Test
public void testEntities() throws Exception {
TimelineEntities entities = new TimelineEntities();
for (int j = 0; j < 2; ++j) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("entity id " + j);
entity.setEntityType("entity type " + j);
entity.setStartTime(System.currentTimeMillis());
for (int i = 0; i < 2; ++i) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2");
entity.addEvent(event);
}
entity.addRelatedEntity("test ref type 1", "test ref id 1");
entity.addRelatedEntity("test ref type 2", "test ref id 2");
entity.addPrimaryFilter("pkey1", "pval1");
entity.addPrimaryFilter("pkey2", "pval2");
entity.addOtherInfo("okey1", "oval1");
entity.addOtherInfo("okey2", "oval2");
entity.setDomainId("domain id " + j);
entities.addEntity(entity);
}
LOG.info("Entities in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entities, true));
Assert.assertEquals(2, entities.getEntities().size());
TimelineEntity entity1 = entities.getEntities().get(0);
Assert.assertEquals("entity id 0", entity1.getEntityId());
Assert.assertEquals("entity type 0", entity1.getEntityType());
Assert.assertEquals(2, entity1.getRelatedEntities().size());
Assert.assertEquals(2, entity1.getEvents().size());
Assert.assertEquals(2, entity1.getPrimaryFilters().size());
Assert.assertEquals(2, entity1.getOtherInfo().size());
Assert.assertEquals("domain id 0", entity1.getDomainId());
TimelineEntity entity2 = entities.getEntities().get(1);
Assert.assertEquals("entity id 1", entity2.getEntityId());
Assert.assertEquals("entity type 1", entity2.getEntityType());
Assert.assertEquals(2, entity2.getRelatedEntities().size());
Assert.assertEquals(2, entity2.getEvents().size());
Assert.assertEquals(2, entity2.getPrimaryFilters().size());
Assert.assertEquals(2, entity2.getOtherInfo().size());
Assert.assertEquals("domain id 1", entity2.getDomainId());
}
@Test
public void testEvents() throws Exception {
TimelineEvents events = new TimelineEvents();
for (int j = 0; j < 2; ++j) {
TimelineEvents.EventsOfOneEntity partEvents =
new TimelineEvents.EventsOfOneEntity();
partEvents.setEntityId("entity id " + j);
partEvents.setEntityType("entity type " + j);
for (int i = 0; i < 2; ++i) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2");
partEvents.addEvent(event);
}
events.addEvent(partEvents);
}
LOG.info("Events in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(events, true));
Assert.assertEquals(2, events.getAllEvents().size());
TimelineEvents.EventsOfOneEntity partEvents1 = events.getAllEvents().get(0);
Assert.assertEquals("entity id 0", partEvents1.getEntityId());
Assert.assertEquals("entity type 0", partEvents1.getEntityType());
Assert.assertEquals(2, partEvents1.getEvents().size());
TimelineEvent event11 = partEvents1.getEvents().get(0);
Assert.assertEquals("event type 0", event11.getEventType());
Assert.assertEquals(2, event11.getEventInfo().size());
TimelineEvent event12 = partEvents1.getEvents().get(1);
Assert.assertEquals("event type 1", event12.getEventType());
Assert.assertEquals(2, event12.getEventInfo().size());
TimelineEvents.EventsOfOneEntity partEvents2 = events.getAllEvents().get(1);
Assert.assertEquals("entity id 1", partEvents2.getEntityId());
Assert.assertEquals("entity type 1", partEvents2.getEntityType());
Assert.assertEquals(2, partEvents2.getEvents().size());
TimelineEvent event21 = partEvents2.getEvents().get(0);
Assert.assertEquals("event type 0", event21.getEventType());
Assert.assertEquals(2, event21.getEventInfo().size());
TimelineEvent event22 = partEvents2.getEvents().get(1);
Assert.assertEquals("event type 1", event22.getEventType());
Assert.assertEquals(2, event22.getEventInfo().size());
}
@Test
public void testTimelinePutErrors() throws Exception {
TimelinePutResponse TimelinePutErrors = new TimelinePutResponse();
TimelinePutError error1 = new TimelinePutError();
error1.setEntityId("entity id 1");
error1.setEntityId("entity type 1");
error1.setErrorCode(TimelinePutError.NO_START_TIME);
TimelinePutErrors.addError(error1);
List<TimelinePutError> response = new ArrayList<TimelinePutError>();
response.add(error1);
TimelinePutError error2 = new TimelinePutError();
error2.setEntityId("entity id 2");
error2.setEntityId("entity type 2");
error2.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.add(error2);
TimelinePutErrors.addErrors(response);
LOG.info("Errors in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(TimelinePutErrors, true));
Assert.assertEquals(3, TimelinePutErrors.getErrors().size());
TimelinePutError e = TimelinePutErrors.getErrors().get(0);
Assert.assertEquals(error1.getEntityId(), e.getEntityId());
Assert.assertEquals(error1.getEntityType(), e.getEntityType());
Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
e = TimelinePutErrors.getErrors().get(1);
Assert.assertEquals(error1.getEntityId(), e.getEntityId());
Assert.assertEquals(error1.getEntityType(), e.getEntityType());
Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
e = TimelinePutErrors.getErrors().get(2);
Assert.assertEquals(error2.getEntityId(), e.getEntityId());
Assert.assertEquals(error2.getEntityType(), e.getEntityType());
Assert.assertEquals(error2.getErrorCode(), e.getErrorCode());
}
@Test
public void testTimelineDomain() throws Exception {
TimelineDomains domains = new TimelineDomains();
TimelineDomain domain = null;
for (int i = 0; i < 2; ++i) {
domain = new TimelineDomain();
domain.setId("test id " + (i + 1));
domain.setDescription("test description " + (i + 1));
domain.setOwner("test owner " + (i + 1));
domain.setReaders("test_reader_user_" + (i + 1) +
" test_reader_group+" + (i + 1));
domain.setWriters("test_writer_user_" + (i + 1) +
" test_writer_group+" + (i + 1));
domain.setCreatedTime(0L);
domain.setModifiedTime(1L);
domains.addDomain(domain);
}
LOG.info("Domain in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(domains, true));
Assert.assertEquals(2, domains.getDomains().size());
for (int i = 0; i < domains.getDomains().size(); ++i) {
domain = domains.getDomains().get(i);
Assert.assertEquals("test id " + (i + 1), domain.getId());
Assert.assertEquals("test description " + (i + 1),
domain.getDescription());
Assert.assertEquals("test owner " + (i + 1), domain.getOwner());
Assert.assertEquals("test_reader_user_" + (i + 1) +
" test_reader_group+" + (i + 1), domain.getReaders());
Assert.assertEquals("test_writer_user_" + (i + 1) +
" test_writer_group+" + (i + 1), domain.getWriters());
Assert.assertEquals(new Long(0L), domain.getCreatedTime());
Assert.assertEquals(new Long(1L), domain.getModifiedTime());
}
}
@Test
public void testMapInterfaceOrTimelineRecords() throws Exception {
TimelineEntity entity = new TimelineEntity();
List<Map<String, Set<Object>>> primaryFiltersList =
new ArrayList<Map<String, Set<Object>>>();
primaryFiltersList.add(
Collections.singletonMap("pkey", Collections.singleton((Object) "pval")));
Map<String, Set<Object>> primaryFilters = new TreeMap<String, Set<Object>>();
primaryFilters.put("pkey1", Collections.singleton((Object) "pval1"));
primaryFilters.put("pkey2", Collections.singleton((Object) "pval2"));
primaryFiltersList.add(primaryFilters);
entity.setPrimaryFilters(null);
for (Map<String, Set<Object>> primaryFiltersToSet : primaryFiltersList) {
entity.setPrimaryFilters(primaryFiltersToSet);
assertPrimaryFilters(entity);
Map<String, Set<Object>> primaryFiltersToAdd =
new WeakHashMap<String, Set<Object>>();
primaryFiltersToAdd.put("pkey3", Collections.singleton((Object) "pval3"));
entity.addPrimaryFilters(primaryFiltersToAdd);
assertPrimaryFilters(entity);
}
List<Map<String, Set<String>>> relatedEntitiesList =
new ArrayList<Map<String, Set<String>>>();
relatedEntitiesList.add(
Collections.singletonMap("rkey", Collections.singleton("rval")));
Map<String, Set<String>> relatedEntities = new TreeMap<String, Set<String>>();
relatedEntities.put("rkey1", Collections.singleton("rval1"));
relatedEntities.put("rkey2", Collections.singleton("rval2"));
relatedEntitiesList.add(relatedEntities);
entity.setRelatedEntities(null);
for (Map<String, Set<String>> relatedEntitiesToSet : relatedEntitiesList) {
entity.setRelatedEntities(relatedEntitiesToSet);
assertRelatedEntities(entity);
Map<String, Set<String>> relatedEntitiesToAdd =
new WeakHashMap<String, Set<String>>();
relatedEntitiesToAdd.put("rkey3", Collections.singleton("rval3"));
entity.addRelatedEntities(relatedEntitiesToAdd);
assertRelatedEntities(entity);
}
List<Map<String, Object>> otherInfoList =
new ArrayList<Map<String, Object>>();
otherInfoList.add(Collections.singletonMap("okey", (Object) "oval"));
Map<String, Object> otherInfo = new TreeMap<String, Object>();
otherInfo.put("okey1", "oval1");
otherInfo.put("okey2", "oval2");
otherInfoList.add(otherInfo);
entity.setOtherInfo(null);
for (Map<String, Object> otherInfoToSet : otherInfoList) {
entity.setOtherInfo(otherInfoToSet);
assertOtherInfo(entity);
Map<String, Object> otherInfoToAdd = new WeakHashMap<String, Object>();
otherInfoToAdd.put("okey3", "oval3");
entity.addOtherInfo(otherInfoToAdd);
assertOtherInfo(entity);
}
TimelineEvent event = new TimelineEvent();
List<Map<String, Object>> eventInfoList =
new ArrayList<Map<String, Object>>();
eventInfoList.add(Collections.singletonMap("ekey", (Object) "eval"));
Map<String, Object> eventInfo = new TreeMap<String, Object>();
eventInfo.put("ekey1", "eval1");
eventInfo.put("ekey2", "eval2");
eventInfoList.add(eventInfo);
event.setEventInfo(null);
for (Map<String, Object> eventInfoToSet : eventInfoList) {
event.setEventInfo(eventInfoToSet);
assertEventInfo(event);
Map<String, Object> eventInfoToAdd = new WeakHashMap<String, Object>();
eventInfoToAdd.put("ekey3", "eval3");
event.addEventInfo(eventInfoToAdd);
assertEventInfo(event);
}
}
private static void assertPrimaryFilters(TimelineEntity entity) {
Assert.assertNotNull(entity.getPrimaryFilters());
Assert.assertNotNull(entity.getPrimaryFiltersJAXB());
Assert.assertTrue(entity.getPrimaryFilters() instanceof HashMap);
Assert.assertTrue(entity.getPrimaryFiltersJAXB() instanceof HashMap);
Assert.assertEquals(
entity.getPrimaryFilters(), entity.getPrimaryFiltersJAXB());
}
private static void assertRelatedEntities(TimelineEntity entity) {
Assert.assertNotNull(entity.getRelatedEntities());
Assert.assertNotNull(entity.getRelatedEntitiesJAXB());
Assert.assertTrue(entity.getRelatedEntities() instanceof HashMap);
Assert.assertTrue(entity.getRelatedEntitiesJAXB() instanceof HashMap);
Assert.assertEquals(
entity.getRelatedEntities(), entity.getRelatedEntitiesJAXB());
}
private static void assertOtherInfo(TimelineEntity entity) {
Assert.assertNotNull(entity.getOtherInfo());
Assert.assertNotNull(entity.getOtherInfoJAXB());
Assert.assertTrue(entity.getOtherInfo() instanceof HashMap);
Assert.assertTrue(entity.getOtherInfoJAXB() instanceof HashMap);
Assert.assertEquals(entity.getOtherInfo(), entity.getOtherInfoJAXB());
}
private static void assertEventInfo(TimelineEvent event) {
Assert.assertNotNull(event);
Assert.assertNotNull(event.getEventInfoJAXB());
Assert.assertTrue(event.getEventInfo() instanceof HashMap);
Assert.assertTrue(event.getEventInfoJAXB() instanceof HashMap);
Assert.assertEquals(event.getEventInfo(), event.getEventInfoJAXB());
}
}
| 14,210 | 43.409375 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/TestClientRMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestClientRMProxy {
@Test
public void testGetRMDelegationTokenService() {
String defaultRMAddress = YarnConfiguration.DEFAULT_RM_ADDRESS;
YarnConfiguration conf = new YarnConfiguration();
// HA is not enabled
Text tokenService = ClientRMProxy.getRMDelegationTokenService(conf);
String[] services = tokenService.toString().split(",");
assertEquals(1, services.length);
for (String service : services) {
assertTrue("Incorrect token service name",
service.contains(defaultRMAddress));
}
// HA is enabled
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm1"),
"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm2"),
"0.0.0.0");
tokenService = ClientRMProxy.getRMDelegationTokenService(conf);
services = tokenService.toString().split(",");
assertEquals(2, services.length);
for (String service : services) {
assertTrue("Incorrect token service name",
service.contains(defaultRMAddress));
}
}
@Test
public void testGetAMRMTokenService() {
String defaultRMAddress = YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS;
YarnConfiguration conf = new YarnConfiguration();
// HA is not enabled
Text tokenService = ClientRMProxy.getAMRMTokenService(conf);
String[] services = tokenService.toString().split(",");
assertEquals(1, services.length);
for (String service : services) {
assertTrue("Incorrect token service name",
service.contains(defaultRMAddress));
}
// HA is enabled
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm1"),
"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm2"),
"0.0.0.0");
tokenService = ClientRMProxy.getAMRMTokenService(conf);
services = tokenService.toString().split(",");
assertEquals(2, services.length);
for (String service : services) {
assertTrue("Incorrect token service name",
service.contains(defaultRMAddress));
}
}
}
| 3,378 | 36.544444 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api.impl;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.net.ConnectException;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.ClientResponse;
public class TestTimelineClient {
private TimelineClientImpl client;
@Before
public void setup() {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
client = createTimelineClient(conf);
}
@After
public void tearDown() {
if (client != null) {
client.stop();
}
}
@Test
public void testPostEntities() throws Exception {
mockEntityClientResponse(client, ClientResponse.Status.OK, false, false);
try {
TimelinePutResponse response = client.putEntities(generateEntity());
Assert.assertEquals(0, response.getErrors().size());
} catch (YarnException e) {
Assert.fail("Exception is not expected");
}
}
@Test
public void testPostEntitiesWithError() throws Exception {
mockEntityClientResponse(client, ClientResponse.Status.OK, true, false);
try {
TimelinePutResponse response = client.putEntities(generateEntity());
Assert.assertEquals(1, response.getErrors().size());
Assert.assertEquals("test entity id", response.getErrors().get(0)
.getEntityId());
Assert.assertEquals("test entity type", response.getErrors().get(0)
.getEntityType());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.IO_EXCEPTION,
response.getErrors().get(0).getErrorCode());
} catch (YarnException e) {
Assert.fail("Exception is not expected");
}
}
@Test
public void testPostEntitiesNoResponse() throws Exception {
mockEntityClientResponse(
client, ClientResponse.Status.INTERNAL_SERVER_ERROR, false, false);
try {
client.putEntities(generateEntity());
Assert.fail("Exception is expected");
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().contains(
"Failed to get the response from the timeline server."));
}
}
@Test
public void testPostEntitiesConnectionRefused() throws Exception {
mockEntityClientResponse(client, null, false, true);
try {
client.putEntities(generateEntity());
Assert.fail("RuntimeException is expected");
} catch (RuntimeException re) {
Assert.assertTrue(re instanceof ClientHandlerException);
}
}
@Test
public void testPutDomain() throws Exception {
mockDomainClientResponse(client, ClientResponse.Status.OK, false);
try {
client.putDomain(generateDomain());
} catch (YarnException e) {
Assert.fail("Exception is not expected");
}
}
@Test
public void testPutDomainNoResponse() throws Exception {
mockDomainClientResponse(client, ClientResponse.Status.FORBIDDEN, false);
try {
client.putDomain(generateDomain());
Assert.fail("Exception is expected");
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().contains(
"Failed to get the response from the timeline server."));
}
}
@Test
public void testPutDomainConnectionRefused() throws Exception {
mockDomainClientResponse(client, null, true);
try {
client.putDomain(generateDomain());
Assert.fail("RuntimeException is expected");
} catch (RuntimeException re) {
Assert.assertTrue(re instanceof ClientHandlerException);
}
}
@Test
public void testCheckRetryCount() throws Exception {
try {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
-2);
createTimelineClient(conf);
Assert.fail();
} catch(IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES));
}
try {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setLong(YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
0);
createTimelineClient(conf);
Assert.fail();
} catch(IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS));
}
int newMaxRetries = 5;
long newIntervalMs = 500;
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
newMaxRetries);
conf.setLong(YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
newIntervalMs);
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
TimelineClientImpl client = createTimelineClient(conf);
try {
// This call should fail because there is no timeline server
client.putEntities(generateEntity());
Assert.fail("Exception expected! "
+ "Timeline server should be off to run this test. ");
} catch (RuntimeException ce) {
Assert.assertTrue(
"Handler exception for reason other than retry: " + ce.getMessage(),
ce.getMessage().contains("Connection retries limit exceeded"));
// we would expect this exception here, check if the client has retried
Assert.assertTrue("Retry filter didn't perform any retries! ", client
.connectionRetry.getRetired());
}
}
@Test
public void testDelegationTokenOperationsRetry() throws Exception {
int newMaxRetries = 5;
long newIntervalMs = 500;
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
newMaxRetries);
conf.setLong(YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
newIntervalMs);
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
// use kerberos to bypass the issue in HADOOP-11215
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
TimelineClientImpl client = createTimelineClient(conf);
TestTimlineDelegationTokenSecretManager dtManager =
new TestTimlineDelegationTokenSecretManager();
try {
dtManager.startThreads();
Thread.sleep(3000);
try {
// try getting a delegation token
client.getDelegationToken(
UserGroupInformation.getCurrentUser().getShortUserName());
assertFail();
} catch (RuntimeException ce) {
assertException(client, ce);
}
try {
// try renew a delegation token
TimelineDelegationTokenIdentifier timelineDT =
new TimelineDelegationTokenIdentifier(
new Text("tester"), new Text("tester"), new Text("tester"));
client.renewDelegationToken(
new Token<TimelineDelegationTokenIdentifier>(timelineDT.getBytes(),
dtManager.createPassword(timelineDT),
timelineDT.getKind(),
new Text("0.0.0.0:8188")));
assertFail();
} catch (RuntimeException ce) {
assertException(client, ce);
}
try {
// try cancel a delegation token
TimelineDelegationTokenIdentifier timelineDT =
new TimelineDelegationTokenIdentifier(
new Text("tester"), new Text("tester"), new Text("tester"));
client.cancelDelegationToken(
new Token<TimelineDelegationTokenIdentifier>(timelineDT.getBytes(),
dtManager.createPassword(timelineDT),
timelineDT.getKind(),
new Text("0.0.0.0:8188")));
assertFail();
} catch (RuntimeException ce) {
assertException(client, ce);
}
} finally {
client.stop();
dtManager.stopThreads();
}
}
private static void assertFail() {
Assert.fail("Exception expected! "
+ "Timeline server should be off to run this test.");
}
private void assertException(TimelineClientImpl client, RuntimeException ce) {
Assert.assertTrue(
"Handler exception for reason other than retry: " + ce.toString(), ce
.getMessage().contains("Connection retries limit exceeded"));
// we would expect this exception here, check if the client has retried
Assert.assertTrue("Retry filter didn't perform any retries! ",
client.connectionRetry.getRetired());
}
private static ClientResponse mockEntityClientResponse(
TimelineClientImpl client, ClientResponse.Status status,
boolean hasError, boolean hasRuntimeError) {
ClientResponse response = mock(ClientResponse.class);
if (hasRuntimeError) {
doThrow(new ClientHandlerException(new ConnectException())).when(client)
.doPostingObject(any(TimelineEntities.class), any(String.class));
return response;
}
doReturn(response).when(client)
.doPostingObject(any(TimelineEntities.class), any(String.class));
when(response.getClientResponseStatus()).thenReturn(status);
TimelinePutResponse.TimelinePutError error =
new TimelinePutResponse.TimelinePutError();
error.setEntityId("test entity id");
error.setEntityType("test entity type");
error.setErrorCode(TimelinePutResponse.TimelinePutError.IO_EXCEPTION);
TimelinePutResponse putResponse = new TimelinePutResponse();
if (hasError) {
putResponse.addError(error);
}
when(response.getEntity(TimelinePutResponse.class)).thenReturn(putResponse);
return response;
}
private static ClientResponse mockDomainClientResponse(
TimelineClientImpl client, ClientResponse.Status status,
boolean hasRuntimeError) {
ClientResponse response = mock(ClientResponse.class);
if (hasRuntimeError) {
doThrow(new ClientHandlerException(new ConnectException())).when(client)
.doPostingObject(any(TimelineDomain.class), any(String.class));
return response;
}
doReturn(response).when(client)
.doPostingObject(any(TimelineDomain.class), any(String.class));
when(response.getClientResponseStatus()).thenReturn(status);
return response;
}
private static TimelineEntity generateEntity() {
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("entity id");
entity.setEntityType("entity type");
entity.setStartTime(System.currentTimeMillis());
for (int i = 0; i < 2; ++i) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("test event type " + i);
event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2");
entity.addEvent(event);
}
entity.addRelatedEntity("test ref type 1", "test ref id 1");
entity.addRelatedEntity("test ref type 2", "test ref id 2");
entity.addPrimaryFilter("pkey1", "pval1");
entity.addPrimaryFilter("pkey2", "pval2");
entity.addOtherInfo("okey1", "oval1");
entity.addOtherInfo("okey2", "oval2");
entity.setDomainId("domain id 1");
return entity;
}
public static TimelineDomain generateDomain() {
TimelineDomain domain = new TimelineDomain();
domain.setId("namesapce id");
domain.setDescription("domain description");
domain.setOwner("domain owner");
domain.setReaders("domain_reader");
domain.setWriters("domain_writer");
domain.setCreatedTime(0L);
domain.setModifiedTime(1L);
return domain;
}
private static TimelineClientImpl createTimelineClient(
YarnConfiguration conf) {
TimelineClientImpl client =
spy((TimelineClientImpl) TimelineClient.createTimelineClient());
client.init(conf);
client.start();
return client;
}
private static class TestTimlineDelegationTokenSecretManager extends
AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
public TestTimlineDelegationTokenSecretManager() {
super(100000, 100000, 100000, 100000);
}
@Override
public TimelineDelegationTokenIdentifier createIdentifier() {
return new TimelineDelegationTokenIdentifier();
}
@Override
public synchronized byte[] createPassword(TimelineDelegationTokenIdentifier identifier) {
return super.createPassword(identifier);
}
}
}
| 14,390 | 36.282383 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.junit.Assert;
import org.junit.Test;
public class TestRackResolver {
private static Log LOG = LogFactory.getLog(TestRackResolver.class);
private static final String invalidHost = "invalidHost";
public static final class MyResolver implements DNSToSwitchMapping {
int numHost1 = 0;
public static String resolvedHost1 = "host1";
@Override
public List<String> resolve(List<String> hostList) {
// Only one host at a time
Assert.assertTrue("hostList size is " + hostList.size(),
hostList.size() <= 1);
List<String> returnList = new ArrayList<String>();
if (hostList.isEmpty()) {
return returnList;
}
if (hostList.get(0).equals(invalidHost)) {
// Simulate condition where resolving host returns null
return null;
}
LOG.info("Received resolve request for "
+ hostList.get(0));
if (hostList.get(0).equals("host1")
|| hostList.get(0).equals(resolvedHost1)) {
numHost1++;
returnList.add("/rack1");
}
// I should not be reached again as RackResolver is supposed to do
// caching.
Assert.assertTrue(numHost1 <= 1);
return returnList;
}
@Override
public void reloadCachedMappings() {
// nothing to do here, since RawScriptBasedMapping has no cache.
}
@Override
public void reloadCachedMappings(List<String> names) {
}
}
@Test
public void testCaching() {
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
try {
InetAddress iaddr = InetAddress.getByName("host1");
MyResolver.resolvedHost1 = iaddr.getHostAddress();
} catch (UnknownHostException e) {
// Ignore if not found
}
Node node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve("host1");
Assert.assertEquals("/rack1", node.getNetworkLocation());
node = RackResolver.resolve(invalidHost);
Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
}
| 3,524 | 32.254717 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import static org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.KB_TO_BYTES;
import static org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.UNAVAILABLE;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.MemInfo;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.ProcessSmapMemoryInfo;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.ProcessTreeSmapMemInfo;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* A JUnit test to test ProcfsBasedProcessTree.
*/
public class TestProcfsBasedProcessTree {
private static final Log LOG = LogFactory
.getLog(TestProcfsBasedProcessTree.class);
protected static File TEST_ROOT_DIR = new File("target",
TestProcfsBasedProcessTree.class.getName() + "-localDir");
private ShellCommandExecutor shexec = null;
private String pidFile, lowestDescendant;
private String shellScript;
private static final int N = 6; // Controls the RogueTask
private class RogueTaskThread extends Thread {
public void run() {
try {
Vector<String> args = new Vector<String>();
if (isSetsidAvailable()) {
args.add("setsid");
}
args.add("bash");
args.add("-c");
args.add(" echo $$ > " + pidFile + "; sh " + shellScript + " " + N
+ ";");
shexec = new ShellCommandExecutor(args.toArray(new String[0]));
shexec.execute();
} catch (ExitCodeException ee) {
LOG.info("Shell Command exit with a non-zero exit code. This is"
+ " expected as we are killing the subprocesses of the"
+ " task intentionally. " + ee);
} catch (IOException ioe) {
LOG.info("Error executing shell command " + ioe);
} finally {
LOG.info("Exit code: " + shexec.getExitCode());
}
}
}
private String getRogueTaskPID() {
File f = new File(pidFile);
while (!f.exists()) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
break;
}
}
// read from pidFile
return getPidFromPidFile(pidFile);
}
@Before
public void setup() throws IOException {
assumeTrue(Shell.LINUX);
FileContext.getLocalFSFileContext().delete(
new Path(TEST_ROOT_DIR.getAbsolutePath()), true);
}
@Test(timeout = 30000)
@SuppressWarnings("deprecation")
public void testProcessTree() throws Exception {
try {
Assert.assertTrue(ProcfsBasedProcessTree.isAvailable());
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",
false);
return;
}
// create shell script
Random rm = new Random();
File tempFile =
new File(TEST_ROOT_DIR, getClass().getName() + "_shellScript_"
+ rm.nextInt() + ".sh");
tempFile.deleteOnExit();
shellScript = TEST_ROOT_DIR + File.separator + tempFile.getName();
// create pid file
tempFile =
new File(TEST_ROOT_DIR, getClass().getName() + "_pidFile_"
+ rm.nextInt() + ".pid");
tempFile.deleteOnExit();
pidFile = TEST_ROOT_DIR + File.separator + tempFile.getName();
lowestDescendant =
TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
// write to shell-script
try {
FileWriter fWriter = new FileWriter(shellScript);
fWriter.write("# rogue task\n" + "sleep 1\n" + "echo hello\n"
+ "if [ $1 -ne 0 ]\n" + "then\n" + " sh " + shellScript
+ " $(($1-1))\n" + "else\n" + " echo $$ > " + lowestDescendant + "\n"
+ " while true\n do\n" + " sleep 5\n" + " done\n" + "fi");
fWriter.close();
} catch (IOException ioe) {
LOG.info("Error: " + ioe);
return;
}
Thread t = new RogueTaskThread();
t.start();
String pid = getRogueTaskPID();
LOG.info("Root process pid: " + pid);
ProcfsBasedProcessTree p = createProcessTree(pid);
p.updateProcessTree(); // initialize
LOG.info("ProcessTree: " + p.toString());
File leaf = new File(lowestDescendant);
// wait till lowest descendant process of Rougue Task starts execution
while (!leaf.exists()) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
break;
}
}
p.updateProcessTree(); // reconstruct
LOG.info("ProcessTree: " + p.toString());
// Get the process-tree dump
String processTreeDump = p.getProcessTreeDump();
// destroy the process and all its subprocesses
destroyProcessTree(pid);
boolean isAlive = true;
for (int tries = 100; tries > 0; tries--) {
if (isSetsidAvailable()) {// whole processtree
isAlive = isAnyProcessInTreeAlive(p);
} else {// process
isAlive = isAlive(pid);
}
if (!isAlive) {
break;
}
Thread.sleep(100);
}
if (isAlive) {
fail("ProcessTree shouldn't be alive");
}
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",
processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i = N; i >= 0; i--) {
String cmdLineDump =
"\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)"
+ " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " " + i;
Pattern pat = Pattern.compile(cmdLineDump);
Matcher mat = pat.matcher(processTreeDump);
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of "
+ i + "th process!", mat.find());
}
// Not able to join thread sometimes when forking with large N.
try {
t.join(2000);
LOG.info("RogueTaskThread successfully joined.");
} catch (InterruptedException ie) {
LOG.info("Interrupted while joining RogueTaskThread.");
}
// ProcessTree is gone now. Any further calls should be sane.
p.updateProcessTree();
Assert.assertFalse("ProcessTree must have been gone", isAlive(pid));
Assert.assertTrue(
"vmem for the gone-process is " + p.getVirtualMemorySize()
+ " . It should be zero.", p.getVirtualMemorySize() == 0);
Assert.assertTrue(
"vmem (old API) for the gone-process is " + p.getCumulativeVmem()
+ " . It should be zero.", p.getCumulativeVmem() == 0);
Assert.assertTrue(p.toString().equals("[ ]"));
}
protected ProcfsBasedProcessTree createProcessTree(String pid) {
return new ProcfsBasedProcessTree(pid);
}
protected ProcfsBasedProcessTree createProcessTree(String pid,
String procfsRootDir, Clock clock) {
return new ProcfsBasedProcessTree(pid, procfsRootDir, clock);
}
protected void destroyProcessTree(String pid) throws IOException {
sendSignal(pid, 9);
}
/**
* Get PID from a pid-file.
*
* @param pidFileName
* Name of the pid-file.
* @return the PID string read from the pid-file. Returns null if the
* pidFileName points to a non-existing file or if read fails from the
* file.
*/
public static String getPidFromPidFile(String pidFileName) {
BufferedReader pidFile = null;
FileReader fReader = null;
String pid = null;
try {
fReader = new FileReader(pidFileName);
pidFile = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
LOG.debug("PidFile doesn't exist : " + pidFileName);
return pid;
}
try {
pid = pidFile.readLine();
} catch (IOException i) {
LOG.error("Failed to read from " + pidFileName);
} finally {
try {
if (fReader != null) {
fReader.close();
}
try {
if (pidFile != null) {
pidFile.close();
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + pidFile);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return pid;
}
public static class ProcessStatInfo {
// sample stat in a single line : 3910 (gpm) S 1 3910 3910 0 -1 4194624
// 83 0 0 0 0 0 0 0 16 0 1 0 7852 2408448 88 4294967295 134512640
// 134590050 3220521392 3220520036 10975138 0 0 4096 134234626
// 4294967295 0 0 17 1 0 0
String pid;
String name;
String ppid;
String pgrpId;
String session;
String vmem = "0";
String rssmemPage = "0";
String utime = "0";
String stime = "0";
public ProcessStatInfo(String[] statEntries) {
pid = statEntries[0];
name = statEntries[1];
ppid = statEntries[2];
pgrpId = statEntries[3];
session = statEntries[4];
vmem = statEntries[5];
if (statEntries.length > 6) {
rssmemPage = statEntries[6];
}
if (statEntries.length > 7) {
utime = statEntries[7];
stime = statEntries[8];
}
}
// construct a line that mimics the procfs stat file.
// all unused numerical entries are set to 0.
public String getStatLine() {
return String.format("%s (%s) S %s %s %s 0 0 0"
+ " 0 0 0 0 %s %s 0 0 0 0 0 0 0 %s %s 0 0" + " 0 0 0 0 0 0 0 0"
+ " 0 0 0 0 0", pid, name, ppid, pgrpId, session, utime, stime, vmem,
rssmemPage);
}
}
public ProcessSmapMemoryInfo constructMemoryMappingInfo(String address,
String[] entries) {
ProcessSmapMemoryInfo info = new ProcessSmapMemoryInfo(address);
info.setMemInfo(MemInfo.SIZE.name(), entries[0]);
info.setMemInfo(MemInfo.RSS.name(), entries[1]);
info.setMemInfo(MemInfo.PSS.name(), entries[2]);
info.setMemInfo(MemInfo.SHARED_CLEAN.name(), entries[3]);
info.setMemInfo(MemInfo.SHARED_DIRTY.name(), entries[4]);
info.setMemInfo(MemInfo.PRIVATE_CLEAN.name(), entries[5]);
info.setMemInfo(MemInfo.PRIVATE_DIRTY.name(), entries[6]);
info.setMemInfo(MemInfo.REFERENCED.name(), entries[7]);
info.setMemInfo(MemInfo.ANONYMOUS.name(), entries[8]);
info.setMemInfo(MemInfo.ANON_HUGE_PAGES.name(), entries[9]);
info.setMemInfo(MemInfo.SWAP.name(), entries[10]);
info.setMemInfo(MemInfo.KERNEL_PAGE_SIZE.name(), entries[11]);
info.setMemInfo(MemInfo.MMU_PAGE_SIZE.name(), entries[12]);
return info;
}
public void createMemoryMappingInfo(ProcessTreeSmapMemInfo[] procMemInfo) {
for (int i = 0; i < procMemInfo.length; i++) {
// Construct 4 memory mappings per process.
// As per min(Shared_Dirty, Pss) + Private_Clean + Private_Dirty
// and not including r--s, r-xs, we should get 100 KB per process
List<ProcessSmapMemoryInfo> memoryMappingList =
procMemInfo[i].getMemoryInfoList();
memoryMappingList.add(constructMemoryMappingInfo(
"7f56c177c000-7f56c177d000 "
+ "rw-p 00010000 08:02 40371558 "
+ "/grid/0/jdk1.7.0_25/jre/lib/amd64/libnio.so",
new String[] { "4", "4", "25", "4", "25", "15", "10", "4", "0", "0",
"0", "4", "4" }));
memoryMappingList.add(constructMemoryMappingInfo(
"7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
new String[] { "4", "4", "25", "4", "0", "15", "10", "4", "0", "0",
"0", "4", "4" }));
memoryMappingList.add(constructMemoryMappingInfo(
"7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] { "4", "4",
"25", "4", "0", "15", "10", "4", "0", "0", "0", "4", "4" }));
memoryMappingList.add(constructMemoryMappingInfo(
"7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] { "4", "4",
"25", "4", "50", "15", "10", "4", "0", "0", "0", "4", "4" }));
}
}
/**
* A basic test that creates a few process directories and writes stat files.
* Verifies that the cpu time and memory is correctly computed.
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout = 30000)
@SuppressWarnings("deprecation")
public void testCpuAndMemoryForProcessTree() throws IOException {
// test processes
String[] pids = { "100", "200", "300", "400" };
ControlledClock testClock = new ControlledClock(new SystemClock());
testClock.setTime(0);
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming processes 100, 200, 300 are in tree and 400 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] =
new ProcessStatInfo(new String[] { "100", "proc1", "1", "100", "100",
"100000", "100", "1000", "200" });
procInfos[1] =
new ProcessStatInfo(new String[] { "200", "proc2", "100", "100",
"100", "200000", "200", "2000", "400" });
procInfos[2] =
new ProcessStatInfo(new String[] { "300", "proc3", "200", "100",
"100", "300000", "300", "3000", "600" });
procInfos[3] =
new ProcessStatInfo(new String[] { "400", "proc4", "1", "400", "400",
"400000", "400", "4000", "800" });
ProcessTreeSmapMemInfo[] memInfo = new ProcessTreeSmapMemInfo[4];
memInfo[0] = new ProcessTreeSmapMemInfo("100");
memInfo[1] = new ProcessTreeSmapMemInfo("200");
memInfo[2] = new ProcessTreeSmapMemInfo("300");
memInfo[3] = new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
// crank up the process tree class.
Configuration conf = new Configuration();
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(), testClock);
processTree.setConf(conf);
// build the process tree.
processTree.updateProcessTree();
// verify virtual memory
Assert.assertEquals("Virtual memory does not match", 600000L,
processTree.getVirtualMemorySize());
// verify rss memory
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 600L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
Assert.assertEquals("rss memory does not match", cumuRssMem,
processTree.getRssMemorySize());
// verify old API
Assert.assertEquals("rss memory (old API) does not match", cumuRssMem,
processTree.getCumulativeRssmem());
// verify cumulative cpu time
long cumuCpuTime =
ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0
? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match", cumuCpuTime,
processTree.getCumulativeCpuTime());
// verify CPU usage
Assert.assertEquals("Percent CPU time should be set to -1 initially",
-1.0, processTree.getCpuUsagePercent(),
0.01);
// Check by enabling smaps
setSmapsInProceTree(processTree, true);
// RSS=Min(shared_dirty,PSS)+PrivateClean+PrivateDirty (exclude r-xs,
// r--s)
Assert.assertEquals("rss memory does not match",
(100 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
// verify old API
Assert.assertEquals("rss memory (old API) does not match",
(100 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
// test the cpu time again to see if it cumulates
procInfos[0] =
new ProcessStatInfo(new String[] { "100", "proc1", "1", "100", "100",
"100000", "100", "2000", "300" });
procInfos[1] =
new ProcessStatInfo(new String[] { "200", "proc2", "100", "100",
"100", "200000", "200", "3000", "500" });
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
long elapsedTimeBetweenUpdatesMsec = 200000;
testClock.setTime(elapsedTimeBetweenUpdatesMsec);
// build the process tree.
processTree.updateProcessTree();
// verify cumulative cpu time again
long prevCumuCpuTime = cumuCpuTime;
cumuCpuTime =
ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0
? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match", cumuCpuTime,
processTree.getCumulativeCpuTime());
double expectedCpuUsagePercent =
(ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0) ?
(cumuCpuTime - prevCumuCpuTime) * 100.0 /
elapsedTimeBetweenUpdatesMsec : 0;
// expectedCpuUsagePercent is given by (94000L - 72000) * 100/
// 200000;
// which in this case is 11. Lets verify that first
Assert.assertEquals(11, expectedCpuUsagePercent, 0.001);
Assert.assertEquals("Percent CPU time is not correct expected " +
expectedCpuUsagePercent, expectedCpuUsagePercent,
processTree.getCpuUsagePercent(),
0.01);
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
private void setSmapsInProceTree(ProcfsBasedProcessTree processTree,
boolean enableFlag) {
Configuration conf = processTree.getConf();
if (conf == null) {
conf = new Configuration();
}
conf.setBoolean(YarnConfiguration.PROCFS_USE_SMAPS_BASED_RSS_ENABLED, enableFlag);
processTree.setConf(conf);
processTree.updateProcessTree();
}
/**
* Tests that cumulative memory is computed only for processes older than a
* given age.
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout = 30000)
public void testMemForOlderProcesses() throws IOException {
testMemForOlderProcesses(false);
testMemForOlderProcesses(true);
}
@SuppressWarnings("deprecation")
private void testMemForOlderProcesses(boolean smapEnabled) throws IOException {
// initial list of processes
String[] pids = { "100", "200", "300", "400" };
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming 100, 200 and 400 are in tree, 300 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] =
new ProcessStatInfo(new String[] { "100", "proc1", "1", "100", "100",
"100000", "100" });
procInfos[1] =
new ProcessStatInfo(new String[] { "200", "proc2", "100", "100",
"100", "200000", "200" });
procInfos[2] =
new ProcessStatInfo(new String[] { "300", "proc3", "1", "300", "300",
"300000", "300" });
procInfos[3] =
new ProcessStatInfo(new String[] { "400", "proc4", "100", "100",
"100", "400000", "400" });
// write smap information invariably for testing
ProcessTreeSmapMemInfo[] memInfo = new ProcessTreeSmapMemInfo[4];
memInfo[0] = new ProcessTreeSmapMemInfo("100");
memInfo[1] = new ProcessTreeSmapMemInfo("200");
memInfo[2] = new ProcessTreeSmapMemInfo("300");
memInfo[3] = new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
// crank up the process tree class.
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(),
new SystemClock());
setSmapsInProceTree(processTree, smapEnabled);
// verify virtual memory
Assert.assertEquals("Virtual memory does not match", 700000L,
processTree.getVirtualMemorySize());
Assert.assertEquals("Virtual memory (old API) does not match", 700000L,
processTree.getCumulativeVmem());
// write one more process as child of 100.
String[] newPids = { "500" };
setupPidDirs(procfsRootDir, newPids);
ProcessStatInfo[] newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] =
new ProcessStatInfo(new String[] { "500", "proc5", "100", "100",
"100", "500000", "500" });
ProcessTreeSmapMemInfo[] newMemInfos = new ProcessTreeSmapMemInfo[1];
newMemInfos[0] = new ProcessTreeSmapMemInfo("500");
createMemoryMappingInfo(newMemInfos);
writeStatFiles(procfsRootDir, newPids, newProcInfos, newMemInfos);
// check memory includes the new process.
processTree.updateProcessTree();
Assert.assertEquals("vmem does not include new process",
1200000L, processTree.getVirtualMemorySize());
Assert.assertEquals("vmem (old API) does not include new process",
1200000L, processTree.getCumulativeVmem());
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 1200L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
Assert.assertEquals("rssmem does not include new process",
cumuRssMem, processTree.getRssMemorySize());
// verify old API
Assert.assertEquals("rssmem (old API) does not include new process",
cumuRssMem, processTree.getCumulativeRssmem());
} else {
Assert.assertEquals("rssmem does not include new process",
100 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
// verify old API
Assert.assertEquals("rssmem (old API) does not include new process",
100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
}
// however processes older than 1 iteration will retain the older value
Assert.assertEquals(
"vmem shouldn't have included new process", 700000L,
processTree.getVirtualMemorySize(1));
// verify old API
Assert.assertEquals(
"vmem (old API) shouldn't have included new process", 700000L,
processTree.getCumulativeVmem(1));
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 700L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
Assert.assertEquals(
"rssmem shouldn't have included new process", cumuRssMem,
processTree.getRssMemorySize(1));
// Verify old API
Assert.assertEquals(
"rssmem (old API) shouldn't have included new process", cumuRssMem,
processTree.getCumulativeRssmem(1));
} else {
Assert.assertEquals(
"rssmem shouldn't have included new process",
100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
// Verify old API
Assert.assertEquals(
"rssmem (old API) shouldn't have included new process",
100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
}
// one more process
newPids = new String[] { "600" };
setupPidDirs(procfsRootDir, newPids);
newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] =
new ProcessStatInfo(new String[] { "600", "proc6", "100", "100",
"100", "600000", "600" });
newMemInfos = new ProcessTreeSmapMemInfo[1];
newMemInfos[0] = new ProcessTreeSmapMemInfo("600");
createMemoryMappingInfo(newMemInfos);
writeStatFiles(procfsRootDir, newPids, newProcInfos, newMemInfos);
// refresh process tree
processTree.updateProcessTree();
// processes older than 2 iterations should be same as before.
Assert.assertEquals(
"vmem shouldn't have included new processes", 700000L,
processTree.getVirtualMemorySize(2));
// verify old API
Assert.assertEquals(
"vmem (old API) shouldn't have included new processes", 700000L,
processTree.getCumulativeVmem(2));
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 700L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
Assert.assertEquals(
"rssmem shouldn't have included new processes",
cumuRssMem, processTree.getRssMemorySize(2));
// Verify old API
Assert.assertEquals(
"rssmem (old API) shouldn't have included new processes",
cumuRssMem, processTree.getCumulativeRssmem(2));
} else {
Assert.assertEquals(
"rssmem shouldn't have included new processes",
100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
// Verify old API
Assert.assertEquals(
"rssmem (old API) shouldn't have included new processes",
100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
}
// processes older than 1 iteration should not include new process,
// but include process 500
Assert.assertEquals(
"vmem shouldn't have included new processes", 1200000L,
processTree.getVirtualMemorySize(1));
// verify old API
Assert.assertEquals(
"vmem (old API) shouldn't have included new processes", 1200000L,
processTree.getCumulativeVmem(1));
if (!smapEnabled) {
long cumuRssMem =
ProcfsBasedProcessTree.PAGE_SIZE > 0
? 1200L * ProcfsBasedProcessTree.PAGE_SIZE :
ResourceCalculatorProcessTree.UNAVAILABLE;
Assert.assertEquals(
"rssmem shouldn't have included new processes",
cumuRssMem, processTree.getRssMemorySize(1));
// verify old API
Assert.assertEquals(
"rssmem (old API) shouldn't have included new processes",
cumuRssMem, processTree.getCumulativeRssmem(1));
} else {
Assert.assertEquals(
"rssmem shouldn't have included new processes",
100 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
Assert.assertEquals(
"rssmem (old API) shouldn't have included new processes",
100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
}
// no processes older than 3 iterations
Assert.assertEquals(
"Getting non-zero vmem for processes older than 3 iterations",
0, processTree.getVirtualMemorySize(3));
// verify old API
Assert.assertEquals(
"Getting non-zero vmem (old API) for processes older than 3 iterations",
0, processTree.getCumulativeVmem(3));
Assert.assertEquals(
"Getting non-zero rssmem for processes older than 3 iterations",
0, processTree.getRssMemorySize(3));
// verify old API
Assert.assertEquals(
"Getting non-zero rssmem (old API) for processes older than 3 iterations",
0, processTree.getCumulativeRssmem(3));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Verifies ProcfsBasedProcessTree.checkPidPgrpidForMatch() in case of
* 'constructProcessInfo() returning null' by not writing stat file for the
* mock process
*
* @throws IOException
* if there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout = 30000)
public void testDestroyProcessTree() throws IOException {
// test process
String pid = "100";
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
// crank up the process tree class.
createProcessTree(pid, procfsRootDir.getAbsolutePath(), new SystemClock());
// Let us not create stat file for pid 100.
Assert.assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(pid,
procfsRootDir.getAbsolutePath()));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Test the correctness of process-tree dump.
*
* @throws IOException
*/
@Test(timeout = 30000)
public void testProcessTreeDump() throws IOException {
String[] pids = { "100", "200", "300", "400", "500", "600" };
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
int numProcesses = pids.length;
// Processes 200, 300, 400 and 500 are descendants of 100. 600 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[numProcesses];
procInfos[0] =
new ProcessStatInfo(new String[] { "100", "proc1", "1", "100", "100",
"100000", "100", "1000", "200" });
procInfos[1] =
new ProcessStatInfo(new String[] { "200", "proc2", "100", "100",
"100", "200000", "200", "2000", "400" });
procInfos[2] =
new ProcessStatInfo(new String[] { "300", "proc3", "200", "100",
"100", "300000", "300", "3000", "600" });
procInfos[3] =
new ProcessStatInfo(new String[] { "400", "proc4", "200", "100",
"100", "400000", "400", "4000", "800" });
procInfos[4] =
new ProcessStatInfo(new String[] { "500", "proc5", "400", "100",
"100", "400000", "400", "4000", "800" });
procInfos[5] =
new ProcessStatInfo(new String[] { "600", "proc6", "1", "1", "1",
"400000", "400", "4000", "800" });
ProcessTreeSmapMemInfo[] memInfos = new ProcessTreeSmapMemInfo[6];
memInfos[0] = new ProcessTreeSmapMemInfo("100");
memInfos[1] = new ProcessTreeSmapMemInfo("200");
memInfos[2] = new ProcessTreeSmapMemInfo("300");
memInfos[3] = new ProcessTreeSmapMemInfo("400");
memInfos[4] = new ProcessTreeSmapMemInfo("500");
memInfos[5] = new ProcessTreeSmapMemInfo("600");
String[] cmdLines = new String[numProcesses];
cmdLines[0] = "proc1 arg1 arg2";
cmdLines[1] = "proc2 arg3 arg4";
cmdLines[2] = "proc3 arg5 arg6";
cmdLines[3] = "proc4 arg7 arg8";
cmdLines[4] = "proc5 arg9 arg10";
cmdLines[5] = "proc6 arg11 arg12";
createMemoryMappingInfo(memInfos);
writeStatFiles(procfsRootDir, pids, procInfos, memInfos);
writeCmdLineFiles(procfsRootDir, pids, cmdLines);
ProcfsBasedProcessTree processTree =
createProcessTree("100", procfsRootDir.getAbsolutePath(),
new SystemClock());
// build the process tree.
processTree.updateProcessTree();
// Get the process-tree dump
String processTreeDump = processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",
processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i = 0; i < 5; i++) {
ProcessStatInfo p = procInfos[i];
Assert.assertTrue(
"Process-tree dump doesn't contain the cmdLineDump of process "
+ p.pid,
processTreeDump.contains("\t|- " + p.pid + " " + p.ppid + " "
+ p.pgrpId + " " + p.session + " (" + p.name + ") " + p.utime
+ " " + p.stime + " " + p.vmem + " " + p.rssmemPage + " "
+ cmdLines[i]));
}
// 600 should not be in the dump
ProcessStatInfo p = procInfos[5];
Assert.assertFalse(
"Process-tree dump shouldn't contain the cmdLineDump of process "
+ p.pid,
processTreeDump.contains("\t|- " + p.pid + " " + p.ppid + " "
+ p.pgrpId + " " + p.session + " (" + p.name + ") " + p.utime + " "
+ p.stime + " " + p.vmem + " " + cmdLines[5]));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
protected static boolean isSetsidAvailable() {
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
String[] args = { "setsid", "bash", "-c", "echo $$" };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("setsid is not available on this machine. So not using it.");
setsidSupported = false;
} finally { // handle the exit code
LOG.info("setsid exited with exit code " + shexec.getExitCode());
}
return setsidSupported;
}
/**
* Is the root-process alive? Used only in tests.
*
* @return true if the root-process is alive, false otherwise.
*/
private static boolean isAlive(String pid) {
try {
final String sigpid = isSetsidAvailable() ? "-" + pid : pid;
try {
sendSignal(sigpid, 0);
} catch (ExitCodeException e) {
return false;
}
return true;
} catch (IOException ignored) {
}
return false;
}
private static void sendSignal(String pid, int signal) throws IOException {
ShellCommandExecutor shexec = null;
String[] arg = { "kill", "-" + signal, pid };
shexec = new ShellCommandExecutor(arg);
shexec.execute();
}
/**
* Is any of the subprocesses in the process-tree alive? Used only in tests.
*
* @return true if any of the processes in the process-tree is alive, false
* otherwise.
*/
private static boolean isAnyProcessInTreeAlive(
ProcfsBasedProcessTree processTree) {
for (String pId : processTree.getCurrentProcessIDs()) {
if (isAlive(pId)) {
return true;
}
}
return false;
}
/**
* Create a directory to mimic the procfs file system's root.
*
* @param procfsRootDir
* root directory to create.
* @throws IOException
* if could not delete the procfs root directory
*/
public static void setupProcfsRootDir(File procfsRootDir) throws IOException {
// cleanup any existing process root dir.
if (procfsRootDir.exists()) {
Assert.assertTrue(FileUtil.fullyDelete(procfsRootDir));
}
// create afresh
Assert.assertTrue(procfsRootDir.mkdirs());
}
/**
* Create PID directories under the specified procfs root directory
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories to create.
* @throws IOException
* If PID dirs could not be created
*/
public static void setupPidDirs(File procfsRootDir, String[] pids)
throws IOException {
for (String pid : pids) {
File pidDir = new File(procfsRootDir, pid);
pidDir.mkdir();
if (!pidDir.exists()) {
throw new IOException("couldn't make process directory under "
+ "fake procfs");
} else {
LOG.info("created pid dir");
}
}
}
/**
* Write stat files under the specified pid directories with data setup in the
* corresponding ProcessStatInfo objects
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories under which to create the stat file
* @param procs
* corresponding ProcessStatInfo objects whose data should be written
* to the stat files.
* @throws IOException
* if stat files could not be written
*/
public static void writeStatFiles(File procfsRootDir, String[] pids,
ProcessStatInfo[] procs, ProcessTreeSmapMemInfo[] smaps)
throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_STAT_FILE);
BufferedWriter bw = null;
try {
FileWriter fw = new FileWriter(statFile);
bw = new BufferedWriter(fw);
bw.write(procs[i].getStatLine());
LOG.info("wrote stat file for " + pids[i] + " with contents: "
+ procs[i].getStatLine());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
if (smaps != null) {
File smapFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.SMAPS);
bw = null;
try {
FileWriter fw = new FileWriter(smapFile);
bw = new BufferedWriter(fw);
bw.write(smaps[i].toString());
bw.flush();
LOG.info("wrote smap file for " + pids[i] + " with contents: "
+ smaps[i].toString());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
private static void writeCmdLineFiles(File procfsRootDir, String[] pids,
String[] cmdLines) throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_CMDLINE_FILE);
BufferedWriter bw = null;
try {
bw = new BufferedWriter(new FileWriter(statFile));
bw.write(cmdLines[i]);
LOG.info("wrote command-line file for " + pids[i] + " with contents: "
+ cmdLines[i]);
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
| 39,386 | 36.157547 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolverScriptBasedMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.junit.Assert;
import org.junit.Test;
public class TestRackResolverScriptBasedMapping {
@Test
public void testScriptName() {
Configuration conf = new Configuration();
conf
.setClass(
CommonConfigurationKeysPublic.
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
ScriptBasedMapping.class, DNSToSwitchMapping.class);
conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
"testScript");
RackResolver.init(conf);
Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),
"script-based mapping with script testScript");
}
}
| 1,679 | 36.333333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestTimes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.junit.Assert;
import org.junit.Test;
public class TestTimes {
@Test
public void testNegativeStartTimes() {
long elapsed = Times.elapsed(-5, 10, true);
Assert.assertEquals("Elapsed time is not 0", 0, elapsed);
elapsed = Times.elapsed(-5, 10, false);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
}
@Test
public void testNegativeFinishTimes() {
long elapsed = Times.elapsed(5, -10, false);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
}
@Test
public void testNegativeStartandFinishTimes() {
long elapsed = Times.elapsed(-5, -10, false);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
}
@Test
public void testPositiveStartandFinishTimes() {
long elapsed = Times.elapsed(5, 10, true);
Assert.assertEquals("Elapsed time is not 5", 5, elapsed);
elapsed = Times.elapsed(5, 10, false);
Assert.assertEquals("Elapsed time is not 5", 5, elapsed);
}
@Test
public void testFinishTimesAheadOfStartTimes() {
long elapsed = Times.elapsed(10, 5, true);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
elapsed = Times.elapsed(10, 5, false);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
// use Long.MAX_VALUE to ensure started time is after the current one
elapsed = Times.elapsed(Long.MAX_VALUE, 0, true);
Assert.assertEquals("Elapsed time is not -1", -1, elapsed);
}
}
| 2,289 | 34.78125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestWindowsBasedProcessTree {
private static final Log LOG = LogFactory
.getLog(TestWindowsBasedProcessTree.class);
class WindowsBasedProcessTreeTester extends WindowsBasedProcessTree {
String infoStr = null;
public WindowsBasedProcessTreeTester(String pid) {
super(pid);
}
@Override
String getAllProcessInfoFromShell() {
return infoStr;
}
}
@Test (timeout = 30000)
@SuppressWarnings("deprecation")
public void tree() {
if( !Shell.WINDOWS) {
LOG.info("Platform not Windows. Not testing");
return;
}
assertTrue("WindowsBasedProcessTree should be available on Windows",
WindowsBasedProcessTree.isAvailable());
WindowsBasedProcessTreeTester pTree = new WindowsBasedProcessTreeTester("-1");
pTree.infoStr = "3524,1024,1024,500\r\n2844,1024,1024,500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getVirtualMemorySize() == 2048);
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getVirtualMemorySize(0) == 2048);
assertTrue(pTree.getCumulativeVmem(0) == 2048);
assertTrue(pTree.getRssMemorySize() == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getRssMemorySize(0) == 2048);
assertTrue(pTree.getCumulativeRssmem(0) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 1000);
pTree.infoStr = "3524,1024,1024,1000\r\n2844,1024,1024,1000\r\n1234,1024,1024,1000\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getVirtualMemorySize() == 3072);
assertTrue(pTree.getCumulativeVmem() == 3072);
assertTrue(pTree.getVirtualMemorySize(1) == 2048);
assertTrue(pTree.getCumulativeVmem(1) == 2048);
assertTrue(pTree.getRssMemorySize() == 3072);
assertTrue(pTree.getCumulativeRssmem() == 3072);
assertTrue(pTree.getRssMemorySize(1) == 2048);
assertTrue(pTree.getCumulativeRssmem(1) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 3000);
pTree.infoStr = "3524,1024,1024,1500\r\n2844,1024,1024,1500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getVirtualMemorySize() == 2048);
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getVirtualMemorySize(2) == 2048);
assertTrue(pTree.getCumulativeVmem(2) == 2048);
assertTrue(pTree.getRssMemorySize() == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getRssMemorySize(2) == 2048);
assertTrue(pTree.getCumulativeRssmem(2) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 4000);
}
}
| 3,608 | 38.228261 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.io.IOException;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
/**
* A JUnit test to test {@link YarnVersionInfo}
*/
public class TestYarnVersionInfo {
/**
* Test the yarn version info routines.
* @throws IOException
*/
@Test
public void versionInfoGenerated() throws IOException {
// can't easily know what the correct values are going to be so just
// make sure they aren't Unknown
assertTrue("getVersion returned Unknown", !YarnVersionInfo.getVersion().equals("Unknown"));
assertTrue("getUser returned Unknown", !YarnVersionInfo.getUser().equals("Unknown"));
assertTrue("getUrl returned Unknown", !YarnVersionInfo.getUrl().equals("Unknown"));
assertTrue("getSrcChecksum returned Unknown", !YarnVersionInfo.getSrcChecksum().equals("Unknown"));
// these could be Unknown if the VersionInfo generated from code not in svn or git
// so just check that they return something
assertNotNull("getRevision returned null", YarnVersionInfo.getRevision());
assertNotNull("getBranch returned null", YarnVersionInfo.getBranch());
assertTrue("getBuildVersion check doesn't contain: source checksum",
YarnVersionInfo.getBuildVersion().contains("source checksum"));
}
}
| 2,217 | 38.607143 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.hamcrest.core.IsInstanceOf.*;
import static org.hamcrest.core.IsSame.*;
/**
* A JUnit test to test {@link ResourceCalculatorPlugin}
*/
public class TestResourceCalculatorProcessTree {
public static class EmptyProcessTree extends ResourceCalculatorProcessTree {
public EmptyProcessTree(String pid) {
super(pid);
}
public void updateProcessTree() {
}
public String getProcessTreeDump() {
return "Empty tree for testing";
}
public long getRssMemorySize(int age) {
return 0;
}
@SuppressWarnings("deprecation")
public long getCumulativeRssmem(int age) {
return 0;
}
public long getVirtualMemorySize(int age) {
return 0;
}
@SuppressWarnings("deprecation")
public long getCumulativeVmem(int age) {
return 0;
}
public long getCumulativeCpuTime() {
return 0;
}
@Override
public float getCpuUsagePercent() {
return UNAVAILABLE;
}
public boolean checkPidPgrpidForMatch() {
return false;
}
}
@Test
public void testCreateInstance() {
ResourceCalculatorProcessTree tree;
tree = ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1", EmptyProcessTree.class, new Configuration());
assertNotNull(tree);
assertThat(tree, instanceOf(EmptyProcessTree.class));
}
@Test
public void testCreatedInstanceConfigured() {
ResourceCalculatorProcessTree tree;
Configuration conf = new Configuration();
tree = ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1", EmptyProcessTree.class, conf);
assertNotNull(tree);
assertThat(tree.getConf(), sameInstance(conf));
}
}
| 2,652 | 27.526882 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/ControlledClock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
public class ControlledClock implements Clock {
private long time = -1;
private final Clock actualClock;
// Convenience for getting a controlled clock with overridden time
public ControlledClock() {
this(new SystemClock());
setTime(0);
}
public ControlledClock(Clock actualClock) {
this.actualClock = actualClock;
}
public synchronized void setTime(long time) {
this.time = time;
}
public synchronized void reset() {
time = -1;
}
public synchronized void tickSec(int seconds) {
tickMsec(seconds * 1000L);
}
public synchronized void tickMsec(long millisec) {
if (time == -1) {
throw new IllegalStateException("ControlledClock setTime should be " +
"called before incrementing time");
}
time = time + millisec;
}
@Override
public synchronized long getTime() {
if (time != -1) {
return time;
}
return actualClock.getTime();
}
}
| 1,763 | 29.413793 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class TestLog4jWarningErrorMetricsAppender {
Log4jWarningErrorMetricsAppender appender;
Log logger = LogFactory.getLog(TestLog4jWarningErrorMetricsAppender.class);
List<Long> cutoff = new ArrayList<>();
void setupAppender(int cleanupIntervalSeconds, long messageAgeLimitSeconds,
int maxUniqueMessages) {
removeAppender();
appender =
new Log4jWarningErrorMetricsAppender(cleanupIntervalSeconds,
messageAgeLimitSeconds, maxUniqueMessages);
Logger.getRootLogger().addAppender(appender);
}
void removeAppender() {
Logger.getRootLogger().removeAppender(appender);
}
void logMessages(Level level, String message, int count) {
for (int i = 0; i < count; ++i) {
switch (level.toInt()) {
case Level.FATAL_INT:
logger.fatal(message);
break;
case Level.ERROR_INT:
logger.error(message);
break;
case Level.WARN_INT:
logger.warn(message);
break;
case Level.INFO_INT:
logger.info(message);
break;
case Level.DEBUG_INT:
logger.debug(message);
break;
case Level.TRACE_INT:
logger.trace(message);
break;
}
}
}
@Test
public void testPurge() throws Exception {
setupAppender(2, 1, 1);
logMessages(Level.ERROR, "test message 1", 1);
cutoff.clear();
cutoff.add(0L);
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(1, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).get(0)
.size());
Thread.sleep(3000);
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(0, appender.getErrorMessagesAndCounts(cutoff).get(0)
.size());
setupAppender(2, 1000, 2);
logMessages(Level.ERROR, "test message 1", 3);
logMessages(Level.ERROR, "test message 2", 2);
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(5, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(2, appender.getErrorMessagesAndCounts(cutoff).get(0)
.size());
logMessages(Level.ERROR, "test message 3", 3);
Thread.sleep(2000);
Assert.assertEquals(8, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(2, appender.getErrorMessagesAndCounts(cutoff).get(0)
.size());
}
@Test
public void testErrorCounts() throws Exception {
cutoff.clear();
setupAppender(100, 100, 100);
cutoff.add(0L);
logMessages(Level.ERROR, "test message 1", 2);
logMessages(Level.ERROR, "test message 2", 3);
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningCounts(cutoff).size());
Assert.assertEquals(5, appender.getErrorCounts(cutoff).get(0).longValue());
Assert
.assertEquals(0, appender.getWarningCounts(cutoff).get(0).longValue());
Thread.sleep(1000);
cutoff.add(Time.now() / 1000);
logMessages(Level.ERROR, "test message 3", 2);
Assert.assertEquals(2, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(2, appender.getWarningCounts(cutoff).size());
Assert.assertEquals(7, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(2, appender.getErrorCounts(cutoff).get(1).longValue());
Assert
.assertEquals(0, appender.getWarningCounts(cutoff).get(0).longValue());
Assert
.assertEquals(0, appender.getWarningCounts(cutoff).get(1).longValue());
}
@Test
public void testWarningCounts() throws Exception {
cutoff.clear();
setupAppender(100, 100, 100);
cutoff.add(0L);
logMessages(Level.WARN, "test message 1", 2);
logMessages(Level.WARN, "test message 2", 3);
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningCounts(cutoff).size());
Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
Assert
.assertEquals(5, appender.getWarningCounts(cutoff).get(0).longValue());
Thread.sleep(1000);
cutoff.add(Time.now() / 1000);
logMessages(Level.WARN, "test message 3", 2);
Assert.assertEquals(2, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(2, appender.getWarningCounts(cutoff).size());
Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(1).longValue());
Assert
.assertEquals(7, appender.getWarningCounts(cutoff).get(0).longValue());
Assert
.assertEquals(2, appender.getWarningCounts(cutoff).get(1).longValue());
}
@Test
public void testWarningMessages() throws Exception {
cutoff.clear();
setupAppender(100, 100, 100);
cutoff.add(0L);
logMessages(Level.WARN, "test message 1", 2);
logMessages(Level.WARN, "test message 2", 3);
Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningMessagesAndCounts(cutoff).size());
Map<String, Log4jWarningErrorMetricsAppender.Element> errorsMap =
appender.getErrorMessagesAndCounts(cutoff).get(0);
Map<String, Log4jWarningErrorMetricsAppender.Element> warningsMap =
appender.getWarningMessagesAndCounts(cutoff).get(0);
Assert.assertEquals(0, errorsMap.size());
Assert.assertEquals(2, warningsMap.size());
Assert.assertTrue(warningsMap.containsKey("test message 1"));
Assert.assertTrue(warningsMap.containsKey("test message 2"));
Log4jWarningErrorMetricsAppender.Element msg1Info = warningsMap.get("test message 1");
Log4jWarningErrorMetricsAppender.Element msg2Info = warningsMap.get("test message 2");
Assert.assertEquals(2, msg1Info.count.intValue());
Assert.assertEquals(3, msg2Info.count.intValue());
Thread.sleep(1000);
cutoff.add(Time.now() / 1000);
logMessages(Level.WARN, "test message 3", 2);
Assert.assertEquals(2, appender.getErrorMessagesAndCounts(cutoff).size());
Assert.assertEquals(2, appender.getWarningMessagesAndCounts(cutoff).size());
errorsMap = appender.getErrorMessagesAndCounts(cutoff).get(0);
warningsMap = appender.getWarningMessagesAndCounts(cutoff).get(0);
Assert.assertEquals(0, errorsMap.size());
Assert.assertEquals(3, warningsMap.size());
Assert.assertTrue(warningsMap.containsKey("test message 3"));
errorsMap = appender.getErrorMessagesAndCounts(cutoff).get(1);
warningsMap = appender.getWarningMessagesAndCounts(cutoff).get(1);
Assert.assertEquals(0, errorsMap.size());
Assert.assertEquals(1, warningsMap.size());
Assert.assertTrue(warningsMap.containsKey("test message 3"));
Log4jWarningErrorMetricsAppender.Element msg3Info = warningsMap.get("test message 3");
Assert.assertEquals(2, msg3Info.count.intValue());
}
@Test
public void testErrorMessages() throws Exception {
cutoff.clear();
setupAppender(100, 100, 100);
cutoff.add(0L);
logMessages(Level.ERROR, "test message 1", 2);
logMessages(Level.ERROR, "test message 2", 3);
Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningMessagesAndCounts(cutoff).size());
Map<String, Log4jWarningErrorMetricsAppender.Element> errorsMap =
appender.getErrorMessagesAndCounts(cutoff).get(0);
Map<String, Log4jWarningErrorMetricsAppender.Element> warningsMap =
appender.getWarningMessagesAndCounts(cutoff).get(0);
Assert.assertEquals(2, errorsMap.size());
Assert.assertEquals(0, warningsMap.size());
Assert.assertTrue(errorsMap.containsKey("test message 1"));
Assert.assertTrue(errorsMap.containsKey("test message 2"));
Log4jWarningErrorMetricsAppender.Element msg1Info = errorsMap.get("test message 1");
Log4jWarningErrorMetricsAppender.Element msg2Info = errorsMap.get("test message 2");
Assert.assertEquals(2, msg1Info.count.intValue());
Assert.assertEquals(3, msg2Info.count.intValue());
Thread.sleep(1000);
cutoff.add(Time.now() / 1000);
logMessages(Level.ERROR, "test message 3", 2);
Assert.assertEquals(2, appender.getErrorMessagesAndCounts(cutoff).size());
Assert.assertEquals(2, appender.getWarningMessagesAndCounts(cutoff).size());
errorsMap = appender.getErrorMessagesAndCounts(cutoff).get(0);
warningsMap = appender.getWarningMessagesAndCounts(cutoff).get(0);
Assert.assertEquals(3, errorsMap.size());
Assert.assertEquals(0, warningsMap.size());
Assert.assertTrue(errorsMap.containsKey("test message 3"));
errorsMap = appender.getErrorMessagesAndCounts(cutoff).get(1);
warningsMap = appender.getWarningMessagesAndCounts(cutoff).get(1);
Assert.assertEquals(1, errorsMap.size());
Assert.assertEquals(0, warningsMap.size());
Assert.assertTrue(errorsMap.containsKey("test message 3"));
Log4jWarningErrorMetricsAppender.Element msg3Info = errorsMap.get("test message 3");
Assert.assertEquals(2, msg3Info.count.intValue());
}
@Test
public void testInfoDebugTrace() {
cutoff.clear();
setupAppender(100, 100, 100);
cutoff.add(0L);
logMessages(Level.INFO, "test message 1", 2);
logMessages(Level.DEBUG, "test message 2", 2);
logMessages(Level.TRACE, "test message 3", 2);
Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningMessagesAndCounts(cutoff).size());
Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
Assert.assertEquals(1, appender.getWarningCounts(cutoff).size());
Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
Assert
.assertEquals(0, appender.getWarningCounts(cutoff).get(0).longValue());
Assert.assertEquals(0, appender.getErrorMessagesAndCounts(cutoff).get(0)
.size());
Assert.assertEquals(0, appender.getWarningMessagesAndCounts(cutoff).get(0)
.size());
}
}
| 11,247 | 42.095785 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.TestContainerId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.junit.Test;
public class TestConverterUtils {
@Test
public void testConvertUrlWithNoPort() throws URISyntaxException {
Path expectedPath = new Path("hdfs://foo.com");
URL url = ConverterUtils.getYarnUrlFromPath(expectedPath);
Path actualPath = ConverterUtils.getPathFromYarnURL(url);
assertEquals(expectedPath, actualPath);
}
@Test
public void testConvertUrlWithUserinfo() throws URISyntaxException {
Path expectedPath = new Path("foo://username:[email protected]:8042");
URL url = ConverterUtils.getYarnUrlFromPath(expectedPath);
Path actualPath = ConverterUtils.getPathFromYarnURL(url);
assertEquals(expectedPath, actualPath);
}
@Test
public void testContainerId() throws URISyntaxException {
ContainerId id = TestContainerId.newContainerId(0, 0, 0, 0);
String cid = ConverterUtils.toString(id);
assertEquals("container_0_0000_00_000000", cid);
ContainerId gen = ConverterUtils.toContainerId(cid);
assertEquals(gen, id);
}
@Test
public void testContainerIdWithEpoch() throws URISyntaxException {
ContainerId id = TestContainerId.newContainerId(0, 0, 0, 25645811);
String cid = ConverterUtils.toString(id);
assertEquals("container_0_0000_00_25645811", cid);
ContainerId gen = ConverterUtils.toContainerId(cid);
assertEquals(gen.toString(), id.toString());
long ts = System.currentTimeMillis();
ContainerId id2 =
TestContainerId.newContainerId(36473, 4365472, ts, 4298334883325L);
String cid2 = ConverterUtils.toString(id2);
assertEquals(
"container_e03_" + ts + "_36473_4365472_999799999997", cid2);
ContainerId gen2 = ConverterUtils.toContainerId(cid2);
assertEquals(gen2.toString(), id2.toString());
ContainerId id3 =
TestContainerId.newContainerId(36473, 4365472, ts, 844424930131965L);
String cid3 = ConverterUtils.toString(id3);
assertEquals(
"container_e767_" + ts + "_36473_4365472_1099511627773", cid3);
ContainerId gen3 = ConverterUtils.toContainerId(cid3);
assertEquals(gen3.toString(), id3.toString());
}
@Test
public void testContainerIdNull() throws URISyntaxException {
assertNull(ConverterUtils.toString((ContainerId)null));
}
@Test
public void testNodeIdWithDefaultPort() throws URISyntaxException {
NodeId nid;
nid = ConverterUtils.toNodeIdWithDefaultPort("node:10");
assertEquals(nid.getPort(), 10);
assertEquals(nid.getHost(), "node");
nid = ConverterUtils.toNodeIdWithDefaultPort("node");
assertEquals(nid.getPort(), 0);
assertEquals(nid.getHost(), "node");
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidContainerId() {
ConverterUtils.toContainerId("container_e20_1423221031460_0003_01");
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidAppattemptId() {
ConverterUtils.toApplicationAttemptId("appattempt_1423221031460");
}
@Test(expected = IllegalArgumentException.class)
public void testApplicationId() {
ConverterUtils.toApplicationId("application_1423221031460");
}
}
| 4,328 | 35.686441 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.junit.Assert;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.AfterClass;
import org.junit.Test;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
public class TestFSDownload {
private static final Log LOG = LogFactory.getLog(TestFSDownload.class);
private static AtomicLong uniqueNumberGenerator =
new AtomicLong(System.currentTimeMillis());
private enum TEST_FILE_TYPE {
TAR, JAR, ZIP, TGZ
};
@AfterClass
public static void deleteTestDir() throws IOException {
FileContext fs = FileContext.getLocalFSFileContext();
fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true);
}
static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
static LocalResource createFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException {
createFile(files, p, len, r);
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(p));
ret.setSize(len);
ret.setType(LocalResourceType.FILE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(p).getModificationTime());
return ret;
}
static void createFile(FileContext files, Path p, int len, Random r)
throws IOException {
FSDataOutputStream out = null;
try {
byte[] bytes = new byte[len];
out = files.create(p, EnumSet.of(CREATE, OVERWRITE));
r.nextBytes(bytes);
out.write(bytes);
} finally {
if (out != null) out.close();
}
}
static LocalResource createJar(FileContext files, Path p,
LocalResourceVisibility vis) throws IOException {
LOG.info("Create jar file " + p);
File jarFile = new File((files.makeQualified(p)).toUri());
FileOutputStream stream = new FileOutputStream(jarFile);
LOG.info("Create jar out stream ");
JarOutputStream out = new JarOutputStream(stream, new Manifest());
LOG.info("Done writing jar stream ");
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(p));
FileStatus status = files.getFileStatus(p);
ret.setSize(status.getLen());
ret.setTimestamp(status.getModificationTime());
ret.setType(LocalResourceType.PATTERN);
ret.setVisibility(vis);
ret.setPattern("classes/.*");
return ret;
}
static LocalResource createTarFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".tar");
archiveFile.createNewFile();
TarArchiveOutputStream out = new TarArchiveOutputStream(
new FileOutputStream(archiveFile));
TarArchiveEntry entry = new TarArchiveEntry(p.getName());
entry.setSize(bytes.length);
out.putArchiveEntry(entry);
out.write(bytes);
out.closeArchiveEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
+ ".tar")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".tar"))
.getModificationTime());
return ret;
}
static LocalResource createTgzFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File gzipFile = new File(p.toUri().getPath() + ".tar.gz");
gzipFile.createNewFile();
TarArchiveOutputStream out = new TarArchiveOutputStream(
new GZIPOutputStream(new FileOutputStream(gzipFile)));
TarArchiveEntry entry = new TarArchiveEntry(p.getName());
entry.setSize(bytes.length);
out.putArchiveEntry(entry);
out.write(bytes);
out.closeArchiveEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
+ ".tar.gz")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".tar.gz"))
.getModificationTime());
return ret;
}
static LocalResource createJarFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".jar");
archiveFile.createNewFile();
JarOutputStream out = new JarOutputStream(
new FileOutputStream(archiveFile));
out.putNextEntry(new JarEntry(p.getName()));
out.write(bytes);
out.closeEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
+ ".jar")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar"))
.getModificationTime());
return ret;
}
static LocalResource createZipFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".ZIP");
archiveFile.createNewFile();
ZipOutputStream out = new ZipOutputStream(
new FileOutputStream(archiveFile));
out.putNextEntry(new ZipEntry(p.getName()));
out.write(bytes);
out.closeEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
+ ".ZIP")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".ZIP"))
.getModificationTime());
return ret;
}
@Test (timeout=10000)
public void testDownloadBadPublic() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource,Future<Path>> pending =
new HashMap<LocalResource,Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
int size = 512;
LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
Path path = new Path(basedir, "test-file");
LocalResource rsrc = createFile(files, path, size, rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
} catch (ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
@Test (timeout=60000)
public void testDownloadPublicWithStatCache() throws IOException,
URISyntaxException, InterruptedException, ExecutionException {
final Configuration conf = new Configuration();
FileContext files = FileContext.getLocalFSFileContext(conf);
Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
// if test directory doesn't have ancestor permission, skip this test
FileSystem f = basedir.getFileSystem(conf);
assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
int size = 512;
final ConcurrentMap<Path,AtomicInteger> counts =
new ConcurrentHashMap<Path,AtomicInteger>();
final CacheLoader<Path,Future<FileStatus>> loader =
FSDownload.createStatusCacheLoader(conf);
final LoadingCache<Path,Future<FileStatus>> statCache =
CacheBuilder.newBuilder().build(new CacheLoader<Path,Future<FileStatus>>() {
public Future<FileStatus> load(Path path) throws Exception {
// increment the count
AtomicInteger count = counts.get(path);
if (count == null) {
count = new AtomicInteger(0);
AtomicInteger existing = counts.putIfAbsent(path, count);
if (existing != null) {
count = existing;
}
}
count.incrementAndGet();
// use the default loader
return loader.load(path);
}
});
// test FSDownload.isPublic() concurrently
final int fileCount = 3;
List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
for (int i = 0; i < fileCount; i++) {
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
final Path path = new Path(basedir, "test-file-" + i);
createFile(files, path, size, rand);
final FileSystem fs = path.getFileSystem(conf);
final FileStatus sStat = fs.getFileStatus(path);
tasks.add(new Callable<Boolean>() {
public Boolean call() throws IOException {
return FSDownload.isPublic(fs, path, sStat, statCache);
}
});
}
ExecutorService exec = Executors.newFixedThreadPool(fileCount);
try {
List<Future<Boolean>> futures = exec.invokeAll(tasks);
// files should be public
for (Future<Boolean> future: futures) {
assertTrue(future.get());
}
// for each path exactly one file status call should be made
for (AtomicInteger count: counts.values()) {
assertSame(count.get(), 1);
}
} finally {
exec.shutdown();
}
}
@Test (timeout=10000)
public void testDownload() throws IOException, URISyntaxException,
InterruptedException {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource,Future<Path>> pending =
new HashMap<LocalResource,Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
int[] sizes = new int[10];
for (int i = 0; i < 10; ++i) {
sizes[i] = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
if (i%2 == 1) {
vis = LocalResourceVisibility.APPLICATION;
}
Path p = new Path(basedir, "" + i);
LocalResource rsrc = createFile(files, p, sizes[i], rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), sizes[i], conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
for (Future<Path> path: pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
Path localized = p.getValue().get();
assertEquals(sizes[Integer.valueOf(localized.getName())], p.getKey()
.getSize());
FileStatus status = files.getFileStatus(localized.getParent());
FsPermission perm = status.getPermission();
assertEquals("Cache directory permissions are incorrect",
new FsPermission((short)0755), perm);
status = files.getFileStatus(localized);
perm = status.getPermission();
System.out.println("File permission " + perm +
" for rsrc vis " + p.getKey().getVisibility().name());
assert(rsrcVis.containsKey(p.getKey()));
Assert.assertTrue("Private file should be 500",
perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort());
}
} catch (ExecutionException e) {
throw new IOException("Failed exec", e);
}
}
private void downloadWithFileType(TEST_FILE_TYPE fileType) throws IOException,
URISyntaxException, InterruptedException{
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
String strFileName = "";
LocalResource rsrc = null;
switch (fileType) {
case TAR:
rsrc = createTarFile(files, p, size, rand, vis);
break;
case JAR:
rsrc = createJarFile(files, p, size, rand, vis);
rsrc.setType(LocalResourceType.PATTERN);
break;
case ZIP:
rsrc = createZipFile(files, p, size, rand, vis);
strFileName = p.getName() + ".ZIP";
break;
case TGZ:
rsrc = createTgzFile(files, p, size, rand, vis);
break;
}
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
try {
pending.get(rsrc).get(); // see if there was an Exception during download
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if(strFileName.endsWith(".ZIP") &&
childfile.getPath().getName().equals(strFileName) &&
!childfile.isDirectory()) {
Assert.fail("Failure...After unzip, there should have been a" +
" directory formed with zip file name but found a file. "
+ childfile.getPath());
}
if (childfile.getPath().getName().startsWith("tmp")) {
Assert.fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
@Test (timeout=10000)
public void testDownloadArchive() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.TAR);
}
@Test (timeout=10000)
public void testDownloadPatternJar() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.JAR);
}
@Test (timeout=10000)
public void testDownloadArchiveZip() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.ZIP);
}
/*
* To test fix for YARN-3029
*/
@Test (timeout=10000)
public void testDownloadArchiveZipWithTurkishLocale() throws IOException,
URISyntaxException, InterruptedException {
Locale defaultLocale = Locale.getDefault();
// Set to Turkish
Locale turkishLocale = new Locale("tr", "TR");
Locale.setDefault(turkishLocale);
downloadWithFileType(TEST_FILE_TYPE.ZIP);
// Set the locale back to original default locale
Locale.setDefault(defaultLocale);
}
@Test (timeout=10000)
public void testDownloadArchiveTgz() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.TGZ);
}
private void verifyPermsRecursively(FileSystem fs,
FileContext files, Path p,
LocalResourceVisibility vis) throws IOException {
FileStatus status = files.getFileStatus(p);
if (status.isDirectory()) {
if (vis == LocalResourceVisibility.PUBLIC) {
Assert.assertTrue(status.getPermission().toShort() ==
FSDownload.PUBLIC_DIR_PERMS.toShort());
}
else {
Assert.assertTrue(status.getPermission().toShort() ==
FSDownload.PRIVATE_DIR_PERMS.toShort());
}
if (!status.isSymlink()) {
FileStatus[] statuses = fs.listStatus(p);
for (FileStatus stat : statuses) {
verifyPermsRecursively(fs, files, stat.getPath(), vis);
}
}
}
else {
if (vis == LocalResourceVisibility.PUBLIC) {
Assert.assertTrue(status.getPermission().toShort() ==
FSDownload.PUBLIC_FILE_PERMS.toShort());
}
else {
Assert.assertTrue(status.getPermission().toShort() ==
FSDownload.PRIVATE_FILE_PERMS.toShort());
}
}
}
@Test (timeout=10000)
public void testDirDownload() throws IOException, InterruptedException {
Configuration conf = new Configuration();
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource,Future<Path>> pending =
new HashMap<LocalResource,Future<Path>>();
ExecutorService exec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
for (int i = 0; i < 5; ++i) {
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
if (i%2 == 1) {
vis = LocalResourceVisibility.APPLICATION;
}
Path p = new Path(basedir, "dir" + i + ".jar");
LocalResource rsrc = createJar(files, p, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
for (Future<Path> path: pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
Path localized = p.getValue().get();
FileStatus status = files.getFileStatus(localized);
System.out.println("Testing path " + localized);
assert(status.isDirectory());
assert(rsrcVis.containsKey(p.getKey()));
verifyPermsRecursively(localized.getFileSystem(conf),
files, localized, rsrcVis.get(p.getKey()));
}
} catch (ExecutionException e) {
throw new IOException("Failed exec", e);
}
}
@Test (timeout=10000)
public void testUniqueDestinationPath() throws Exception {
Configuration conf = new Configuration();
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
ExecutorService singleThreadedExec = Executors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), conf);
destPath =
new Path(destPath, Long.toString(uniqueNumberGenerator
.incrementAndGet()));
Path p = new Path(basedir, "dir" + 0 + ".jar");
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
LocalResource rsrc = createJar(files, p, vis);
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
Future<Path> rPath = singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000, TimeUnit.MILLISECONDS));
Assert.assertTrue(rPath.isDone());
// Now FSDownload will not create a random directory to localize the
// resource. Therefore the final localizedPath for the resource should be
// destination directory (passed as an argument) + file name.
Assert.assertEquals(destPath, rPath.get().getParent());
}
}
| 26,626 | 36.930199 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Appender;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Logger;
import org.apache.log4j.Priority;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
public class TestAdHocLogDumper {
private static final Log LOG = LogFactory.getLog(TestAdHocLogDumper.class);
@Test
public void testDumpingSchedulerLogs() throws Exception {
Map<Appender, Priority> levels = new HashMap<>();
String logHierarchy = TestAdHocLogDumper.class.getName();
String logFilename = "test.log";
Log log = LogFactory.getLog(logHierarchy);
if (log instanceof Log4JLogger) {
for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
.hasMoreElements();) {
Object obj = appenders.nextElement();
if (obj instanceof AppenderSkeleton) {
AppenderSkeleton appender = (AppenderSkeleton) obj;
levels.put(appender, appender.getThreshold());
}
}
}
AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logFilename);
dumper.dumpLogs("DEBUG", 1000);
LOG.debug("test message 1");
LOG.info("test message 2");
File logFile = new File(logFilename);
Assert.assertTrue(logFile.exists());
Thread.sleep(2000);
long lastWrite = logFile.lastModified();
Assert.assertTrue(lastWrite < Time.now());
Assert.assertTrue(logFile.length() != 0);
// make sure levels are set back to their original values
if (log instanceof Log4JLogger) {
for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
.hasMoreElements();) {
Object obj = appenders.nextElement();
if (obj instanceof AppenderSkeleton) {
AppenderSkeleton appender = (AppenderSkeleton) obj;
Assert.assertEquals(levels.get(appender), appender.getThreshold());
}
}
}
boolean del = logFile.delete();
if(!del) {
LOG.info("Couldn't clean up after test");
}
}
}
| 3,094 | 34.574713 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestApps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.util.Shell;
import org.junit.Test;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class TestApps {
@Test
public void testSetEnvFromInputString() {
Map<String, String> environment = new HashMap<String, String>();
environment.put("JAVA_HOME", "/path/jdk");
String goodEnv = "a1=1,b_2=2,_c=3,d=4,e=,f_win=%JAVA_HOME%"
+ ",g_nix=$JAVA_HOME";
Apps.setEnvFromInputString(environment, goodEnv, File.pathSeparator);
assertEquals("1", environment.get("a1"));
assertEquals("2", environment.get("b_2"));
assertEquals("3", environment.get("_c"));
assertEquals("4", environment.get("d"));
assertEquals("", environment.get("e"));
if (Shell.WINDOWS) {
assertEquals("$JAVA_HOME", environment.get("g_nix"));
assertEquals("/path/jdk", environment.get("f_win"));
} else {
assertEquals("/path/jdk", environment.get("g_nix"));
assertEquals("%JAVA_HOME%", environment.get("f_win"));
}
String badEnv = "1,,2=a=b,3=a=,4==,5==a,==,c-3=3,=";
environment.clear();
Apps.setEnvFromInputString(environment, badEnv, File.pathSeparator);
assertEquals(environment.size(), 0);
// Test "=" in the value part
environment.clear();
Apps.setEnvFromInputString(environment, "b1,e1==,e2=a1=a2,b2",
File.pathSeparator);
assertEquals("=", environment.get("e1"));
assertEquals("a1=a2", environment.get("e2"));
}
}
| 2,332 | 36.629032 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import org.apache.hadoop.yarn.api.records.Resource;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestResources {
public Resource createResource(int memory, int vCores) {
return Resource.newInstance(memory, vCores);
}
@Test(timeout=1000)
public void testCompareToWithUnboundedResource() {
assertTrue(Resources.unbounded().compareTo(
createResource(Integer.MAX_VALUE, Integer.MAX_VALUE)) == 0);
assertTrue(Resources.unbounded().compareTo(
createResource(Integer.MAX_VALUE, 0)) > 0);
assertTrue(Resources.unbounded().compareTo(
createResource(0, Integer.MAX_VALUE)) > 0);
}
@Test(timeout=1000)
public void testCompareToWithNoneResource() {
assertTrue(Resources.none().compareTo(createResource(0, 0)) == 0);
assertTrue(Resources.none().compareTo(
createResource(1, 0)) < 0);
assertTrue(Resources.none().compareTo(
createResource(0, 1)) < 0);
}
}
| 1,828 | 34.862745 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.yarn.api.records.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class TestResourceCalculator {
private ResourceCalculator resourceCalculator;
@Parameterized.Parameters
public static Collection<ResourceCalculator[]> getParameters() {
return Arrays.asList(new ResourceCalculator[][] {
{ new DefaultResourceCalculator() },
{ new DominantResourceCalculator() } });
}
public TestResourceCalculator(ResourceCalculator rs) {
this.resourceCalculator = rs;
}
@Test(timeout = 10000)
public void testResourceCalculatorCompareMethod() {
Resource clusterResource = Resource.newInstance(0, 0);
// For lhs == rhs
Resource lhs = Resource.newInstance(0, 0);
Resource rhs = Resource.newInstance(0, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
// lhs > rhs
lhs = Resource.newInstance(1, 1);
rhs = Resource.newInstance(0, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
true, lhs, rhs);
// For lhs < rhs
lhs = Resource.newInstance(0, 0);
rhs = Resource.newInstance(1, 1);
assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
false, rhs, lhs);
if (!(resourceCalculator instanceof DominantResourceCalculator)) {
return;
}
// verify for 2 dimensional resources i.e memory and cpu
// dominant resource types
lhs = Resource.newInstance(1, 0);
rhs = Resource.newInstance(0, 1);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
lhs = Resource.newInstance(0, 1);
rhs = Resource.newInstance(1, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
lhs = Resource.newInstance(1, 1);
rhs = Resource.newInstance(1, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
true, lhs, rhs);
lhs = Resource.newInstance(0, 1);
rhs = Resource.newInstance(1, 1);
assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
false, rhs, lhs);
}
private void assertResourcesOperations(Resource clusterResource,
Resource lhs, Resource rhs, boolean lessThan, boolean lessThanOrEqual,
boolean greaterThan, boolean greaterThanOrEqual, Resource max,
Resource min) {
Assert.assertEquals("Less Than operation is wrongly calculated.", lessThan,
Resources.lessThan(resourceCalculator, clusterResource, lhs, rhs));
Assert.assertEquals(
"Less Than Or Equal To operation is wrongly calculated.",
lessThanOrEqual, Resources.lessThanOrEqual(resourceCalculator,
clusterResource, lhs, rhs));
Assert.assertEquals("Greater Than operation is wrongly calculated.",
greaterThan,
Resources.greaterThan(resourceCalculator, clusterResource, lhs, rhs));
Assert.assertEquals(
"Greater Than Or Equal To operation is wrongly calculated.",
greaterThanOrEqual, Resources.greaterThanOrEqual(resourceCalculator,
clusterResource, lhs, rhs));
Assert.assertEquals("Max(value) Operation wrongly calculated.", max,
Resources.max(resourceCalculator, clusterResource, lhs, rhs));
Assert.assertEquals("Min(value) operation is wrongly calculated.", min,
Resources.min(resourceCalculator, clusterResource, lhs, rhs));
}
}
| 4,487 | 34.904 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
DummyCommonNodeLabelsManager mgr = null;
@Before
public void before() {
mgr = new DummyCommonNodeLabelsManager();
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
mgr.init(conf);
mgr.start();
}
@After
public void after() {
mgr.stop();
}
@Test(timeout = 5000)
public void testAddRemovelabel() throws Exception {
// Add some label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("hello"));
verifyNodeLabelAdded(Sets.newHashSet("hello"), mgr.lastAddedlabels);
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("world"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("hello1", "world1"));
verifyNodeLabelAdded(Sets.newHashSet("hello1", "world1"), mgr.lastAddedlabels);
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Sets.newHashSet("hello", "world", "hello1", "world1")));
try {
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("hello1",
false)));
Assert.fail("IOException not thrown on exclusivity change of labels");
} catch (Exception e) {
Assert.assertTrue("IOException is expected when exclusivity is modified",
e instanceof IOException);
}
try {
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("hello1",
true)));
} catch (Exception e) {
Assert.assertFalse(
"IOException not expected when no change in exclusivity",
e instanceof IOException);
}
// try to remove null, empty and non-existed label, should fail
for (String p : Arrays.asList(null, CommonNodeLabelsManager.NO_LABEL, "xx")) {
boolean caught = false;
try {
mgr.removeFromClusterNodeLabels(Arrays.asList(p));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("remove label should fail "
+ "when label is null/empty/non-existed", caught);
}
// Remove some label
mgr.removeFromClusterNodeLabels(Arrays.asList("hello"));
assertCollectionEquals(Sets.newHashSet("hello"), mgr.lastRemovedlabels);
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("world", "hello1", "world1")));
mgr.removeFromClusterNodeLabels(Arrays
.asList("hello1", "world1", "world"));
Assert.assertTrue(mgr.lastRemovedlabels.containsAll(Sets.newHashSet(
"hello1", "world1", "world")));
Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty());
}
@Test(timeout = 5000)
public void testAddlabelWithCase() throws Exception {
// Add some label, case will not ignore here
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("HeLlO"));
verifyNodeLabelAdded(Sets.newHashSet("HeLlO"), mgr.lastAddedlabels);
Assert.assertFalse(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("hello")));
}
@Test(timeout = 5000)
public void testAddlabelWithExclusivity() throws Exception {
// Add some label, case will not ignore here
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("a", false), NodeLabel.newInstance("b", true)));
Assert.assertFalse(mgr.isExclusiveNodeLabel("a"));
Assert.assertTrue(mgr.isExclusiveNodeLabel("b"));
}
@Test(timeout = 5000)
public void testAddInvalidlabel() throws IOException {
boolean caught = false;
try {
Set<String> set = new HashSet<String>();
set.add(null);
mgr.addToCluserNodeLabelsWithDefaultExclusivity(set);
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("null label should not add to repo", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(CommonNodeLabelsManager.NO_LABEL));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("empty label should not add to repo", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-?"));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("invalid label charactor should not add to repo", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(StringUtils.repeat("c", 257)));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("too long label should not add to repo", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-aaabbb"));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("label cannot start with \"-\"", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("_aaabbb"));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("label cannot start with \"_\"", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("a^aabbb"));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("label cannot contains other chars like ^[] ...", caught);
caught = false;
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("aa[a]bbb"));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("label cannot contains other chars like ^[] ...", caught);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout = 5000)
public void testAddReplaceRemoveLabelsOnNodes() throws Exception {
// set a label on a node, but label doesn't exist
boolean caught = false;
try {
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("node"), toSet("label")));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("trying to set a label to a node but "
+ "label doesn't exist in repository should fail", caught);
// set a label on a node, but node is null or empty
try {
mgr.replaceLabelsOnNode(ImmutableMap.of(
toNodeId(CommonNodeLabelsManager.NO_LABEL), toSet("label")));
} catch (IOException e) {
caught = true;
}
Assert.assertTrue("trying to add a empty node but succeeded", caught);
// set node->label one by one
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p3")));
assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"),
toSet("p2"), toNodeId("n2"), toSet("p3")));
assertMapEquals(mgr.lastNodeToLabels,
ImmutableMap.of(toNodeId("n2"), toSet("p3")));
// set bunch of node->label
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"),
toNodeId("n1"), toSet("p1")));
assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"),
toSet("p1"), toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3")));
assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n3"),
toSet("p3"), toNodeId("n1"), toSet("p1")));
/*
* n1: p1
* n2: p3
* n3: p3
*/
// remove label on node
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"),
toSet("p3"), toNodeId("n3"), toSet("p3")));
assertMapEquals(mgr.lastNodeToLabels,
ImmutableMap.of(toNodeId("n1"), CommonNodeLabelsManager.EMPTY_STRING_SET));
// add label on node
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
assertMapEquals(
mgr.getNodeLabels(),
ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"),
toSet("p3"), toNodeId("n3"), toSet("p3")));
assertMapEquals(mgr.lastNodeToLabels,
ImmutableMap.of(toNodeId("n1"), toSet("p1")));
// remove labels on node
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"),
toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3")));
Assert.assertEquals(0, mgr.getNodeLabels().size());
assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n1"),
CommonNodeLabelsManager.EMPTY_STRING_SET, toNodeId("n2"),
CommonNodeLabelsManager.EMPTY_STRING_SET, toNodeId("n3"),
CommonNodeLabelsManager.EMPTY_STRING_SET));
}
@Test(timeout = 5000)
public void testRemovelabelWithNodes() throws Exception {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p2")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n3"), toSet("p3")));
mgr.removeFromClusterNodeLabels(ImmutableSet.of("p1"));
assertMapEquals(mgr.getNodeLabels(),
ImmutableMap.of(toNodeId("n2"), toSet("p2"), toNodeId("n3"), toSet("p3")));
assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p1"));
mgr.removeFromClusterNodeLabels(ImmutableSet.of("p2", "p3"));
Assert.assertTrue(mgr.getNodeLabels().isEmpty());
Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty());
assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p2", "p3"));
}
@Test(timeout = 5000)
public void testTrimLabelsWhenAddRemoveNodeLabels() throws IOException {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet(" p1"));
assertCollectionEquals(mgr.getClusterNodeLabelNames(), toSet("p1"));
mgr.removeFromClusterNodeLabels(toSet("p1 "));
Assert.assertTrue(mgr.getClusterNodeLabelNames().isEmpty());
}
@Test(timeout = 5000)
public void testTrimLabelsWhenModifyLabelsOnNodes() throws IOException {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet(" p1", "p2"));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1 ")));
assertMapEquals(
mgr.getNodeLabels(),
ImmutableMap.of(toNodeId("n1"), toSet("p1")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2")));
assertMapEquals(
mgr.getNodeLabels(),
ImmutableMap.of(toNodeId("n1"), toSet("p2")));
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2 ")));
Assert.assertTrue(mgr.getNodeLabels().isEmpty());
}
@Test(timeout = 5000)
public void testReplaceLabelsOnHostsShouldUpdateNodesBelongTo()
throws IOException {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
assertMapEquals(
mgr.getNodeLabels(),
ImmutableMap.of(toNodeId("n1"), toSet("p1")));
// Replace labels on n1:1 to P2
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"),
toNodeId("n1:2"), toSet("p2")));
assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"),
toSet("p1"), toNodeId("n1:1"), toSet("p2"), toNodeId("n1:2"),
toSet("p2")));
// Replace labels on n1 to P1, both n1:1/n1 will be P1 now
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"),
toSet("p1"), toNodeId("n1:1"), toSet("p1"), toNodeId("n1:2"),
toSet("p1")));
// Set labels on n1:1 to P2 again to verify if add/remove works
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2")));
}
private void assertNodeLabelsDisabledErrorMessage(IOException e) {
Assert.assertEquals(CommonNodeLabelsManager.NODE_LABELS_NOT_ENABLED_ERR,
e.getMessage());
}
@Test(timeout = 5000)
public void testNodeLabelsDisabled() throws IOException {
DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager();
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false);
mgr.init(conf);
mgr.start();
boolean caught = false;
// add labels
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x"));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// remove labels
try {
mgr.removeFromClusterNodeLabels(ImmutableSet.of("x"));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// add labels to node
try {
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("host", 0),
CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// remove labels from node
try {
mgr.removeLabelsFromNode(ImmutableMap.of(NodeId.newInstance("host", 0),
CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// replace labels on node
try {
mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("host", 0),
CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
mgr.close();
}
@Test(timeout = 5000)
public void testLabelsToNodes()
throws IOException {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
Map<String, Set<NodeId>> labelsToNodes = mgr.getLabelsToNodes();
assertLabelsToNodesEquals(
labelsToNodes,
ImmutableMap.of(
"p1", toSet(toNodeId("n1"))));
assertLabelsToNodesEquals(
labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels()));
// Replace labels on n1:1 to P2
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2"),
toNodeId("n1:2"), toSet("p2")));
labelsToNodes = mgr.getLabelsToNodes();
assertLabelsToNodesEquals(
labelsToNodes,
ImmutableMap.of(
"p1", toSet(toNodeId("n1")),
"p2", toSet(toNodeId("n1:1"),toNodeId("n1:2"))));
assertLabelsToNodesEquals(
labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels()));
// Replace labels on n1 to P1, both n1:1/n1 will be P1 now
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
labelsToNodes = mgr.getLabelsToNodes();
assertLabelsToNodesEquals(
labelsToNodes,
ImmutableMap.of(
"p1", toSet(toNodeId("n1"),toNodeId("n1:1"),toNodeId("n1:2"))));
assertLabelsToNodesEquals(
labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels()));
// Set labels on n1:1 to P2 again to verify if add/remove works
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p2")));
// Add p3 to n1, should makes n1:1 to be p2/p3, and n1:2 to be p1/p3
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p3")));
labelsToNodes = mgr.getLabelsToNodes();
assertLabelsToNodesEquals(
labelsToNodes,
ImmutableMap.of(
"p1", toSet(toNodeId("n1"),toNodeId("n1:2")),
"p2", toSet(toNodeId("n1:1")),
"p3", toSet(toNodeId("n2"))));
assertLabelsToNodesEquals(
labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels()));
// Remove P3 from n1, should makes n1:1 to be p2, and n1:2 to be p1
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n2"), toSet("p3")));
labelsToNodes = mgr.getLabelsToNodes();
assertLabelsToNodesEquals(
labelsToNodes,
ImmutableMap.of(
"p1", toSet(toNodeId("n1"),toNodeId("n1:2")),
"p2", toSet(toNodeId("n1:1"))));
assertLabelsToNodesEquals(
labelsToNodes, transposeNodeToLabels(mgr.getNodeLabels()));
}
@Test(timeout = 5000)
public void testLabelsToNodesForSelectedLabels()
throws IOException {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addLabelsToNode(
ImmutableMap.of(
toNodeId("n1:1"), toSet("p1"),
toNodeId("n1:2"), toSet("p2")));
Set<String> setlabels =
new HashSet<String>(Arrays.asList(new String[]{"p1"}));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(setlabels),
ImmutableMap.of("p1", toSet(toNodeId("n1:1"))));
// Replace labels on n1:1 to P3
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p3")));
assertTrue(mgr.getLabelsToNodes(setlabels).isEmpty());
setlabels = new HashSet<String>(Arrays.asList(new String[]{"p2", "p3"}));
assertLabelsToNodesEquals(
mgr.getLabelsToNodes(setlabels),
ImmutableMap.of(
"p3", toSet(toNodeId("n1"), toNodeId("n1:1"),toNodeId("n1:2"))));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p2")));
assertLabelsToNodesEquals(
mgr.getLabelsToNodes(setlabels),
ImmutableMap.of(
"p2", toSet(toNodeId("n2")),
"p3", toSet(toNodeId("n1"), toNodeId("n1:1"),toNodeId("n1:2"))));
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p3")));
setlabels =
new HashSet<String>(Arrays.asList(new String[]{"p1", "p2", "p3"}));
assertLabelsToNodesEquals(
mgr.getLabelsToNodes(setlabels),
ImmutableMap.of(
"p2", toSet(toNodeId("n2"))));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n3"), toSet("p1")));
assertLabelsToNodesEquals(
mgr.getLabelsToNodes(setlabels),
ImmutableMap.of(
"p1", toSet(toNodeId("n3")),
"p2", toSet(toNodeId("n2"))));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2:2"), toSet("p3")));
assertLabelsToNodesEquals(
mgr.getLabelsToNodes(setlabels),
ImmutableMap.of(
"p1", toSet(toNodeId("n3")),
"p2", toSet(toNodeId("n2")),
"p3", toSet(toNodeId("n2:2"))));
setlabels = new HashSet<String>(Arrays.asList(new String[]{"p1"}));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(setlabels),
ImmutableMap.of("p1", toSet(toNodeId("n3"))));
}
@Test(timeout = 5000)
public void testNoMoreThanOneLabelExistedInOneHost() throws IOException {
boolean failed = false;
// As in YARN-2694, we temporarily disable no more than one label existed in
// one host
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
try {
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1", "p2")));
} catch (IOException e) {
failed = true;
}
Assert.assertTrue("Should failed when set > 1 labels on a host", failed);
try {
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1", "p2")));
} catch (IOException e) {
failed = true;
}
Assert.assertTrue("Should failed when add > 1 labels on a host", failed);
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
// add a same label to a node, #labels in this node is still 1, shouldn't
// fail
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
try {
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
} catch (IOException e) {
failed = true;
}
Assert.assertTrue("Should failed when #labels > 1 on a host after add",
failed);
}
private void verifyNodeLabelAdded(Set<String> expectedAddedLabelNames,
Collection<NodeLabel> addedNodeLabels) {
Assert.assertEquals(expectedAddedLabelNames.size(), addedNodeLabels.size());
for (NodeLabel label : addedNodeLabels) {
Assert.assertTrue(expectedAddedLabelNames.contains(label.getName()));
}
}
@Test(timeout = 5000)
public void testReplaceLabelsOnNodeInDistributedMode() throws Exception {
//create new DummyCommonNodeLabelsManager than the one got from @before
mgr.stop();
mgr = new DummyCommonNodeLabelsManager();
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
mgr.init(conf);
mgr.start();
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
Set<String> labelsByNode = mgr.getLabelsByNode(toNodeId("n1"));
Assert.assertNull(
"Labels are not expected to be written to the NodeLabelStore",
mgr.lastNodeToLabels);
Assert.assertNotNull("Updated labels should be available from the Mgr",
labelsByNode);
Assert.assertTrue(labelsByNode.contains("p1"));
}
@Test(timeout = 5000)
public void testLabelsInfoToNodes() throws IOException {
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", false),
NodeLabel.newInstance("p2", true), NodeLabel.newInstance("p3", true)));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1")));
Map<NodeLabel, Set<NodeId>> labelsToNodes = mgr.getLabelsInfoToNodes();
assertLabelsInfoToNodesEquals(labelsToNodes, ImmutableMap.of(
NodeLabel.newInstance("p1", false), toSet(toNodeId("n1"))));
}
@Test(timeout = 5000)
public void testGetNodeLabelsInfo() throws IOException {
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", false),
NodeLabel.newInstance("p2", true), NodeLabel.newInstance("p3", false)));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p3")));
assertLabelInfoMapEquals(mgr.getNodeLabelsInfo(), ImmutableMap.of(
toNodeId("n1"), toSet(NodeLabel.newInstance("p2", true)),
toNodeId("n2"), toSet(NodeLabel.newInstance("p3", false))));
}
}
| 23,955 | 37.63871 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
MockNodeLabelManager mgr = null;
Configuration conf = null;
private static class MockNodeLabelManager extends
CommonNodeLabelsManager {
@Override
protected void initDispatcher(Configuration conf) {
super.dispatcher = new InlineDispatcher();
}
@Override
protected void startDispatcher() {
// do nothing
}
@Override
protected void stopDispatcher() {
// do nothing
}
}
private FileSystemNodeLabelsStore getStore() {
return (FileSystemNodeLabelsStore) mgr.store;
}
@Before
public void before() throws IOException {
mgr = new MockNodeLabelManager();
conf = new Configuration();
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
File tempDir = File.createTempFile("nlb", ".tmp");
tempDir.delete();
tempDir.mkdirs();
tempDir.deleteOnExit();
conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
tempDir.getAbsolutePath());
mgr.init(conf);
mgr.start();
}
@After
public void after() throws IOException {
getStore().fs.delete(getStore().fsWorkingPath, true);
mgr.stop();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout = 10000)
public void testRecoverWithMirror() throws Exception {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p4"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p5", "p6"));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n1"), toSet("p1"),
toNodeId("n2"), toSet("p2")));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"),
toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"),
toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6")));
/*
* node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7
*/
mgr.removeFromClusterNodeLabels(toSet("p1"));
mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5"));
/*
* After removed p2: n2 p4: n4 p6: n6, n7
*/
// shutdown mgr and start a new mgr
mgr.stop();
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
// check variables
Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6")));
assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"),
toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"),
toNodeId("n7"), toSet("p6")));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(),
ImmutableMap.of(
"p6", toSet(toNodeId("n6"), toNodeId("n7")),
"p4", toSet(toNodeId("n4")),
"p2", toSet(toNodeId("n2"))));
// stutdown mgr and start a new mgr
mgr.stop();
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
// check variables
Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6")));
assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"),
toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"),
toNodeId("n7"), toSet("p6")));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(),
ImmutableMap.of(
"p6", toSet(toNodeId("n6"), toNodeId("n7")),
"p4", toSet(toNodeId("n4")),
"p2", toSet(toNodeId("n2"))));
mgr.stop();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout = 10000)
public void testRecoverWithDistributedNodeLabels() throws Exception {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p4"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p5", "p6"));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n1"), toSet("p1"),
toNodeId("n2"), toSet("p2")));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"),
toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"),
toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6")));
mgr.removeFromClusterNodeLabels(toSet("p1"));
mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5"));
mgr.stop();
mgr = new MockNodeLabelManager();
Configuration cf = new Configuration(conf);
cf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
mgr.init(cf);
mgr.start();
// check variables
Assert.assertEquals(3, mgr.getClusterNodeLabels().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6")));
Assert.assertTrue("During recovery in distributed node-labels setup, "
+ "node to labels mapping should not be recovered ", mgr
.getNodeLabels().size() == 0);
mgr.stop();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout = 10000)
public void testEditlogRecover() throws Exception {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p4"));
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p5", "p6"));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"),
toNodeId("n2"), toSet("p2")));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"),
toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"),
toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6")));
/*
* node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7
*/
mgr.removeFromClusterNodeLabels(toSet("p1"));
mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5"));
/*
* After removed p2: n2 p4: n4 p6: n6, n7
*/
// shutdown mgr and start a new mgr
mgr.stop();
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
// check variables
Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6")));
assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"),
toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"),
toNodeId("n7"), toSet("p6")));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(),
ImmutableMap.of(
"p6", toSet(toNodeId("n6"), toNodeId("n7")),
"p4", toSet(toNodeId("n4")),
"p2", toSet(toNodeId("n2"))));
mgr.stop();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test (timeout = 10000)
public void testSerilizationAfterRecovery() throws Exception {
// Add to cluster node labels, p2/p6 are non-exclusive.
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", true),
NodeLabel.newInstance("p2", false), NodeLabel.newInstance("p3", true),
NodeLabel.newInstance("p4", true), NodeLabel.newInstance("p5", true),
NodeLabel.newInstance("p6", false)));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"),
toNodeId("n2"), toSet("p2")));
mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"),
toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"),
toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6")));
/*
* node -> labels
* p1: n1
* p2: n2
* p3: n3
* p4: n4
* p5: n5
* p6: n6, n7
*/
mgr.removeFromClusterNodeLabels(toSet("p1"));
mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5"));
/*
* After removed
* p2: n2
* p4: n4
* p6: n6, n7
*/
// shutdown mgr and start a new mgr
mgr.stop();
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
// check variables
Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6")));
assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"),
toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"),
toNodeId("n7"), toSet("p6")));
assertLabelsToNodesEquals(mgr.getLabelsToNodes(),
ImmutableMap.of(
"p6", toSet(toNodeId("n6"), toNodeId("n7")),
"p4", toSet(toNodeId("n4")),
"p2", toSet(toNodeId("n2"))));
Assert.assertFalse(mgr.isExclusiveNodeLabel("p2"));
Assert.assertTrue(mgr.isExclusiveNodeLabel("p4"));
Assert.assertFalse(mgr.isExclusiveNodeLabel("p6"));
/*
* Add label p7,p8 then shutdown
*/
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p7", "p8"));
mgr.stop();
/*
* Restart, add label p9 and shutdown
*/
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p9"));
mgr.stop();
/*
* Recovery, and see if p9 added
*/
mgr = new MockNodeLabelManager();
mgr.init(conf);
mgr.start();
// check variables
Assert.assertEquals(6, mgr.getClusterNodeLabelNames().size());
Assert.assertTrue(mgr.getClusterNodeLabelNames().containsAll(
Arrays.asList("p2", "p4", "p6", "p7", "p8", "p9")));
mgr.stop();
}
}
| 10,999 | 33.267913 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.junit.Assert;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
public class NodeLabelTestBase {
public static void assertMapEquals(Map<NodeId, Set<String>> m1,
ImmutableMap<NodeId, Set<String>> m2) {
Assert.assertEquals(m1.size(), m2.size());
for (NodeId k : m1.keySet()) {
Assert.assertTrue(m2.containsKey(k));
assertCollectionEquals(m1.get(k), m2.get(k));
}
}
public static void assertLabelInfoMapEquals(Map<NodeId, Set<NodeLabel>> m1,
ImmutableMap<NodeId, Set<NodeLabel>> m2) {
Assert.assertEquals(m1.size(), m2.size());
for (NodeId k : m1.keySet()) {
Assert.assertTrue(m2.containsKey(k));
assertNLCollectionEquals(m1.get(k), m2.get(k));
}
}
public static void assertLabelsToNodesEquals(Map<String, Set<NodeId>> m1,
ImmutableMap<String, Set<NodeId>> m2) {
Assert.assertEquals(m1.size(), m2.size());
for (String k : m1.keySet()) {
Assert.assertTrue(m2.containsKey(k));
Set<NodeId> s1 = new HashSet<NodeId>(m1.get(k));
Set<NodeId> s2 = new HashSet<NodeId>(m2.get(k));
Assert.assertEquals(s1, s2);
Assert.assertTrue(s1.containsAll(s2));
}
}
public static ImmutableMap<String, Set<NodeId>> transposeNodeToLabels(
Map<NodeId, Set<String>> mapNodeToLabels) {
Map<String, Set<NodeId>> mapLabelsToNodes =
new HashMap<String, Set<NodeId>>();
for(Entry<NodeId, Set<String>> entry : mapNodeToLabels.entrySet()) {
NodeId node = entry.getKey();
Set<String> setLabels = entry.getValue();
for(String label : setLabels) {
Set<NodeId> setNode = mapLabelsToNodes.get(label);
if (setNode == null) {
setNode = new HashSet<NodeId>();
}
setNode.add(NodeId.newInstance(node.getHost(), node.getPort()));
mapLabelsToNodes.put(label, setNode);
}
}
return ImmutableMap.copyOf(mapLabelsToNodes);
}
public static void assertMapContains(Map<NodeId, Set<String>> m1,
ImmutableMap<NodeId, Set<String>> m2) {
for (NodeId k : m2.keySet()) {
Assert.assertTrue(m1.containsKey(k));
assertCollectionEquals(m1.get(k), m2.get(k));
}
}
public static void assertCollectionEquals(Collection<String> c1,
Collection<String> c2) {
Set<String> s1 = new HashSet<String>(c1);
Set<String> s2 = new HashSet<String>(c2);
Assert.assertEquals(s1, s2);
Assert.assertTrue(s1.containsAll(s2));
}
public static void assertNLCollectionEquals(Collection<NodeLabel> c1,
Collection<NodeLabel> c2) {
Set<NodeLabel> s1 = new HashSet<NodeLabel>(c1);
Set<NodeLabel> s2 = new HashSet<NodeLabel>(c2);
Assert.assertEquals(s1, s2);
Assert.assertTrue(s1.containsAll(s2));
}
@SuppressWarnings("unchecked")
public static <E> Set<E> toSet(E... elements) {
Set<E> set = Sets.newHashSet(elements);
return set;
}
@SuppressWarnings("unchecked")
public static Set<NodeLabel> toNodeLabelSet(String... nodeLabelsStr) {
if (null == nodeLabelsStr) {
return null;
}
Set<NodeLabel> labels = new HashSet<NodeLabel>();
for (String label : nodeLabelsStr) {
labels.add(NodeLabel.newInstance(label));
}
return labels;
}
public NodeId toNodeId(String str) {
if (str.contains(":")) {
int idx = str.indexOf(':');
NodeId id =
NodeId.newInstance(str.substring(0, idx),
Integer.valueOf(str.substring(idx + 1)));
return id;
} else {
return NodeId.newInstance(str, CommonNodeLabelsManager.WILDCARD_PORT);
}
}
public static void assertLabelsInfoToNodesEquals(
Map<NodeLabel, Set<NodeId>> m1, ImmutableMap<NodeLabel, Set<NodeId>> m2) {
Assert.assertEquals(m1.size(), m2.size());
for (NodeLabel k : m1.keySet()) {
Assert.assertTrue(m2.containsKey(k));
Set<NodeId> s1 = new HashSet<NodeId>(m1.get(k));
Set<NodeId> s2 = new HashSet<NodeId>(m2.get(k));
Assert.assertEquals(s1, s2);
Assert.assertTrue(s1.containsAll(s2));
}
}
}
| 5,179 | 33.304636 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.event.InlineDispatcher;
public class DummyCommonNodeLabelsManager extends CommonNodeLabelsManager {
Map<NodeId, Set<String>> lastNodeToLabels = null;
Collection<NodeLabel> lastAddedlabels = null;
Collection<String> lastRemovedlabels = null;
@Override
public void initNodeLabelStore(Configuration conf) {
this.store = new NodeLabelsStore(this) {
@Override
public void recover(boolean ignoreNodeToLabelsMappings)
throws IOException {
}
@Override
public void removeClusterNodeLabels(Collection<String> labels)
throws IOException {
lastRemovedlabels = labels;
}
@Override
public void updateNodeToLabelsMappings(
Map<NodeId, Set<String>> nodeToLabels) throws IOException {
lastNodeToLabels = nodeToLabels;
}
@Override
public void storeNewClusterNodeLabels(List<NodeLabel> label) throws IOException {
lastAddedlabels = label;
}
@Override
public void close() throws IOException {
// do nothing
}
};
}
@Override
protected void initDispatcher(Configuration conf) {
super.dispatcher = new InlineDispatcher();
}
@Override
protected void startDispatcher() {
// do nothing
}
@Override
protected void stopDispatcher() {
// do nothing
}
@Override
protected void serviceStop() throws Exception {
super.serviceStop();
}
}
| 2,588 | 27.766667 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import org.junit.Assert;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
public class TestYarnConfiguration {
@Test
public void testDefaultRMWebUrl() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
// shouldn't have a "/" on the end of the url as all the other uri routinnes
// specifically add slashes and Jetty doesn't handle double slashes.
Assert.assertNotSame("RM Web Url is not correct", "http://0.0.0.0:8088",
rmWebUrl);
}
@Test
public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
// seems a bit odd but right now we are forcing webapp for RM to be
// RM_ADDRESS
// for host and use the port from the RM_WEBAPP_ADDRESS
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts = rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect", 24543,
Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame(
"RM Web Url not resolved correctly. Should not be rmtesting",
"http://rmtesting:24543", rmWebUrl);
}
@Test
public void testGetSocketAddressForNMWithHA() {
YarnConfiguration conf = new YarnConfiguration();
// Set NM address
conf.set(YarnConfiguration.NM_ADDRESS, "0.0.0.0:1234");
// Set HA
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
assertTrue(HAUtil.isHAEnabled(conf));
InetSocketAddress addr = conf.getSocketAddr(YarnConfiguration.NM_ADDRESS,
YarnConfiguration.DEFAULT_NM_ADDRESS,
YarnConfiguration.DEFAULT_NM_PORT);
assertEquals(1234, addr.getPort());
}
@Test
public void testGetSocketAddr() throws Exception {
YarnConfiguration conf;
InetSocketAddress resourceTrackerAddress;
//all default
conf = new YarnConfiguration();
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
resourceTrackerAddress);
//with address
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.1");
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
"10.0.0.1",
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
resourceTrackerAddress);
//address and socket
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5001");
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
"10.0.0.2",
5001),
resourceTrackerAddress);
//bind host only
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_BIND_HOST, "10.0.0.3");
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
"10.0.0.3",
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
resourceTrackerAddress);
//bind host and address no port
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2");
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
"0.0.0.0",
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
resourceTrackerAddress);
//bind host and address with port
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5003");
resourceTrackerAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(
new InetSocketAddress(
"0.0.0.0",
5003),
resourceTrackerAddress);
}
@Test
public void testUpdateConnectAddr() throws Exception {
YarnConfiguration conf;
InetSocketAddress resourceTrackerConnectAddress;
InetSocketAddress serverAddress;
//no override, old behavior. Won't work on a host named "yo.yo.yo"
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress = conf.updateConnectAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
serverAddress);
assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
//cause override with address
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress = conf.updateConnectAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
serverAddress);
assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
//tests updateConnectAddr won't add suffix to NM service address configurations
conf = new YarnConfiguration();
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "yo.yo.yo");
conf.set(YarnConfiguration.NM_BIND_HOST, "0.0.0.0");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1]));
InetSocketAddress localizerAddress = conf.updateConnectAddr(
YarnConfiguration.NM_BIND_HOST,
YarnConfiguration.NM_LOCALIZER_ADDRESS,
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
serverAddress);
assertTrue(localizerAddress.toString().startsWith("yo.yo.yo"));
assertNull(conf.get(
HAUtil.addSuffix(YarnConfiguration.NM_LOCALIZER_ADDRESS, "rm1")));
}
}
| 9,024 | 38.41048 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Before;
import org.junit.Test;
import java.util.Collection;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestHAUtil {
private Configuration conf;
private static final String RM1_ADDRESS_UNTRIMMED = " \t\t\n 1.2.3.4:8021 \n\t ";
private static final String RM1_ADDRESS = RM1_ADDRESS_UNTRIMMED.trim();
private static final String RM2_ADDRESS = "localhost:8022";
private static final String RM3_ADDRESS = "localhost:8033";
private static final String RM1_NODE_ID_UNTRIMMED = "rm1 ";
private static final String RM1_NODE_ID = RM1_NODE_ID_UNTRIMMED.trim();
private static final String RM2_NODE_ID = "rm2";
private static final String RM3_NODE_ID = "rm3";
private static final String RM_INVALID_NODE_ID = ".rm";
private static final String RM_NODE_IDS_UNTRIMMED = RM1_NODE_ID_UNTRIMMED + "," + RM2_NODE_ID;
private static final String RM_NODE_IDS = RM1_NODE_ID + "," + RM2_NODE_ID;
@Before
public void setUp() {
conf = new Configuration();
conf.set(YarnConfiguration.RM_HA_IDS, RM_NODE_IDS_UNTRIMMED);
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
// configuration key itself cannot contains space/tab/return chars.
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
}
}
@Test
public void testGetRMServiceId() throws Exception {
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
Collection<String> rmhaIds = HAUtil.getRMHAIds(conf);
assertEquals(2, rmhaIds.size());
String[] ids = rmhaIds.toArray(new String[0]);
assertEquals(RM1_NODE_ID, ids[0]);
assertEquals(RM2_NODE_ID, ids[1]);
}
@Test
public void testGetRMId() throws Exception {
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
assertEquals("Does not honor " + YarnConfiguration.RM_HA_ID,
RM1_NODE_ID, HAUtil.getRMHAId(conf));
conf.clear();
assertNull("Return null when " + YarnConfiguration.RM_HA_ID
+ " is not set", HAUtil.getRMHAId(conf));
}
@Test
public void testVerifyAndSetConfiguration() throws Exception {
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
fail("Should not throw any exceptions.");
}
assertEquals("Should be saved as Trimmed collection",
StringUtils.getStringCollection(RM_NODE_IDS), HAUtil.getRMHAIds(conf));
assertEquals("Should be saved as Trimmed string",
RM1_NODE_ID, HAUtil.getRMHAId(conf));
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
assertEquals("RPC address not set for " + confKey,
RM1_ADDRESS, conf.get(confKey));
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
assertEquals("YarnRuntimeException by verifyAndSetRMHAIds()",
HAUtil.BAD_CONFIG_MESSAGE_PREFIX +
HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,
conf.get(YarnConfiguration.RM_HA_IDS) +
"\nHA mode requires atleast two RMs"),
e.getMessage());
}
conf.clear();
// simulate the case YarnConfiguration.RM_HA_ID is not set
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
+ RM2_NODE_ID);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()",
HAUtil.BAD_CONFIG_MESSAGE_PREFIX +
HAUtil.getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID),
e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_ID, RM_INVALID_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
+ RM1_NODE_ID);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
// simulate xml with invalid node id
conf.set(confKey + RM_INVALID_NODE_ID, RM_INVALID_NODE_ID);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
assertEquals("YarnRuntimeException by addSuffix()",
HAUtil.BAD_CONFIG_MESSAGE_PREFIX +
HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
RM_INVALID_NODE_ID),
e.getMessage());
}
conf.clear();
// simulate the case HAUtil.RM_RPC_ADDRESS_CONF_KEYS are not set
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
fail("Should throw YarnRuntimeException. by Configuration#set()");
} catch (YarnRuntimeException e) {
String confKey =
HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID);
assertEquals("YarnRuntimeException by Configuration#set()",
HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(
HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, RM1_NODE_ID)
+ " or " + confKey), e.getMessage());
}
// simulate the case YarnConfiguration.RM_HA_IDS doesn't contain
// the value of YarnConfiguration.RM_HA_ID
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS, RM2_NODE_ID + "," + RM3_NODE_ID);
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()'s validation",
HAUtil.BAD_CONFIG_MESSAGE_PREFIX +
HAUtil.getRMHAIdNeedToBeIncludedMessage("[rm2, rm3]", RM1_NODE_ID),
e.getMessage());
}
}
@Test
public void testGetConfKeyForRMInstance() {
assertTrue("RM instance id is not suffixed",
HAUtil.getConfKeyForRMInstance(YarnConfiguration.RM_ADDRESS, conf)
.contains(HAUtil.getRMHAId(conf)));
assertFalse("RM instance id is suffixed",
HAUtil.getConfKeyForRMInstance(YarnConfiguration.NM_ADDRESS, conf)
.contains(HAUtil.getRMHAId(conf)));
}
}
| 7,892 | 39.06599 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
@SuppressWarnings("rawtypes")
public class DrainDispatcher extends AsyncDispatcher {
public DrainDispatcher() {
this(new LinkedBlockingQueue<Event>());
}
public DrainDispatcher(BlockingQueue<Event> eventQueue) {
super(eventQueue);
}
/**
* Wait till event thread enters WAITING state (i.e. waiting for new events).
*/
public void waitForEventThreadToWait() {
while (!isEventThreadWaiting()) {
Thread.yield();
}
}
/**
* Busy loop waiting for all queued events to drain.
*/
public void await() {
while (!isDrained()) {
Thread.yield();
}
}
}
| 1,534 | 28.519231 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Assert;
import org.junit.Test;
public class TestAsyncDispatcher {
/* This test checks whether dispatcher hangs on close if following two things
* happen :
* 1. A thread which was putting event to event queue is interrupted.
* 2. Event queue is empty on close.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout=10000)
public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
Event event = mock(Event.class);
doThrow(new InterruptedException()).when(eventQueue).put(event);
DrainDispatcher disp = new DrainDispatcher(eventQueue);
disp.init(new Configuration());
disp.setDrainEventsOnStop();
disp.start();
// Wait for event handler thread to start and begin waiting for events.
disp.waitForEventThreadToWait();
try {
disp.getEventHandler().handle(event);
} catch (YarnRuntimeException e) {
}
// Queue should be empty and dispatcher should not hang on close
Assert.assertTrue("Event Queue should have been empty",
eventQueue.isEmpty());
disp.close();
}
}
| 2,335 | 36.079365 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
@SuppressWarnings({"unchecked", "rawtypes"})
public class InlineDispatcher extends AsyncDispatcher {
private static final Log LOG = LogFactory.getLog(InlineDispatcher.class);
private class TestEventHandler implements EventHandler {
@Override
public void handle(Event event) {
dispatch(event);
}
}
@Override
protected void dispatch(Event event) {
LOG.info("Dispatching the event " + event.getClass().getName() + "."
+ event.toString());
Class<? extends Enum> type = event.getType().getDeclaringClass();
if (eventDispatchers.get(type) != null) {
eventDispatchers.get(type).handle(event);
}
}
@Override
public EventHandler getEventHandler() {
return new TestEventHandler();
}
public static class EmptyEventHandler implements EventHandler<Event> {
@Override
public void handle(Event event) {
//do nothing
}
}
}
| 1,978 | 33.12069 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/ipc/TestRPCUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.ipc;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.junit.Test;
import com.google.protobuf.ServiceException;
public class TestRPCUtil {
@Test
public void testUnknownExceptionUnwrapping() {
Class<? extends Throwable> exception = YarnException.class;
String className = "UnknownException.class";
verifyRemoteExceptionUnwrapping(exception, className);
}
@Test
public void testRemoteIOExceptionUnwrapping() {
Class<? extends Throwable> exception = IOException.class;
verifyRemoteExceptionUnwrapping(exception, exception.getName());
}
@Test
public void testRemoteIOExceptionDerivativeUnwrapping() {
// Test IOException sub-class
Class<? extends Throwable> exception = FileNotFoundException.class;
verifyRemoteExceptionUnwrapping(exception, exception.getName());
}
@Test
public void testRemoteYarnExceptionUnwrapping() {
Class<? extends Throwable> exception = YarnException.class;
verifyRemoteExceptionUnwrapping(exception, exception.getName());
}
@Test
public void testRemoteYarnExceptionDerivativeUnwrapping() {
Class<? extends Throwable> exception = YarnTestException.class;
verifyRemoteExceptionUnwrapping(exception, exception.getName());
}
@Test
public void testRemoteRuntimeExceptionUnwrapping() {
Class<? extends Throwable> exception = NullPointerException.class;
verifyRemoteExceptionUnwrapping(exception, exception.getName());
}
@Test
public void testUnexpectedRemoteExceptionUnwrapping() {
// Non IOException, YarnException thrown by the remote side.
Class<? extends Throwable> exception = Exception.class;
verifyRemoteExceptionUnwrapping(RemoteException.class, exception.getName());
}
@Test
public void testRemoteYarnExceptionWithoutStringConstructor() {
// Derivatives of YarnException should always define a string constructor.
Class<? extends Throwable> exception = YarnTestExceptionNoConstructor.class;
verifyRemoteExceptionUnwrapping(RemoteException.class, exception.getName());
}
@Test
public void testRPCServiceExceptionUnwrapping() {
String message = "ServiceExceptionMessage";
ServiceException se = new ServiceException(message);
Throwable t = null;
try {
RPCUtil.unwrapAndThrowException(se);
} catch (Throwable thrown) {
t = thrown;
}
Assert.assertTrue(IOException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
@Test
public void testRPCIOExceptionUnwrapping() {
String message = "DirectIOExceptionMessage";
IOException ioException = new FileNotFoundException(message);
ServiceException se = new ServiceException(ioException);
Throwable t = null;
try {
RPCUtil.unwrapAndThrowException(se);
} catch (Throwable thrown) {
t = thrown;
}
Assert.assertTrue(FileNotFoundException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
@Test
public void testRPCRuntimeExceptionUnwrapping() {
String message = "RPCRuntimeExceptionUnwrapping";
RuntimeException re = new NullPointerException(message);
ServiceException se = new ServiceException(re);
Throwable t = null;
try {
RPCUtil.unwrapAndThrowException(se);
} catch (Throwable thrown) {
t = thrown;
}
Assert.assertTrue(NullPointerException.class.isInstance(t));
Assert.assertTrue(t.getMessage().contains(message));
}
private void verifyRemoteExceptionUnwrapping(
Class<? extends Throwable> expectedLocalException,
String realExceptionClassName) {
String message = realExceptionClassName + "Message";
RemoteException re = new RemoteException(realExceptionClassName, message);
ServiceException se = new ServiceException(re);
Throwable t = null;
try {
RPCUtil.unwrapAndThrowException(se);
} catch (Throwable thrown) {
t = thrown;
}
Assert.assertTrue("Expected exception [" + expectedLocalException
+ "] but found " + t, expectedLocalException.isInstance(t));
Assert.assertTrue(
"Expected message [" + message + "] but found " + t.getMessage(), t
.getMessage().contains(message));
}
private static class YarnTestException extends YarnException {
private static final long serialVersionUID = 1L;
@SuppressWarnings("unused")
public YarnTestException(String message) {
super(message);
}
}
private static class YarnTestExceptionNoConstructor extends
YarnException {
private static final long serialVersionUID = 1L;
}
}
| 5,568 | 31.377907 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doThrow;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.util.Arrays;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.TestContainerId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
import org.apache.hadoop.yarn.util.Times;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
public class TestAggregatedLogFormat {
private static final File testWorkDir = new File("target",
"TestAggregatedLogFormat");
private static final Configuration conf = new Configuration();
private static final FileSystem fs;
private static final char filler = 'x';
private static final Log LOG = LogFactory
.getLog(TestAggregatedLogFormat.class);
static {
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Before
@After
public void cleanupTestDir() throws Exception {
Path workDirPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("Cleaning test directory [" + workDirPath + "]");
fs.delete(workDirPath, true);
}
//Test for Corrupted AggregatedLogs. The Logs should not write more data
//if Logvalue.write() is called and the application is still
//appending to logs
@Test
public void testForCorruptedAggregatedLogs() throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
long numChars = 950000;
writeSrcFileAndALog(srcFilePath, "stdout", numChars, remoteAppLogFile,
srcFileRoot, testContainerId);
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
try {
LogReader.readAcontainerLogs(dis, writer);
} catch (Exception e) {
if(e.toString().contains("NumberFormatException")) {
Assert.fail("Aggregated logs are corrupted.");
}
}
}
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for(int i=0; i < length/3; i++) {
osw.write(ch);
}
latch.countDown();
for(int i=0; i < (2*length)/3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
logWriter.close();
}
@Test
public void testReadAcontainerLogs1() throws Exception {
//Verify the output generated by readAContainerLogs(DataInputStream, Writer, logUploadedTime)
testReadAcontainerLog(true);
//Verify the output generated by readAContainerLogs(DataInputStream, Writer)
testReadAcontainerLog(false);
}
private void testReadAcontainerLog(boolean logUploadedTime) throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
int numChars = 80000;
// create a sub-folder under srcFilePath
// and create file logs in this sub-folder.
// We only aggregate top level files.
// So, this log file should be ignored.
Path subDir = new Path(srcFilePath, "subDir");
fs.mkdirs(subDir);
writeSrcFile(subDir, "logs", numChars);
// create file stderr and stdout in containerLogDir
writeSrcFile(srcFilePath, "stderr", numChars);
writeSrcFile(srcFilePath, "stdout", numChars);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName());
// When we try to open FileInputStream for stderr, it will throw out an IOException.
// Skip the log aggregation for stderr.
LogValue spyLogValue = spy(logValue);
File errorFile = new File((new Path(srcFilePath, "stderr")).toString());
doThrow(new IOException("Mock can not open FileInputStream")).when(
spyLogValue).secureOpenFile(errorFile);
logWriter.append(logKey, spyLogValue);
logWriter.close();
// make sure permission are correct on the file
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",
FsPermission.createImmutable((short) 0640), fsStatus.getPermission());
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
if (logUploadedTime) {
LogReader.readAcontainerLogs(dis, writer, System.currentTimeMillis());
} else {
LogReader.readAcontainerLogs(dis, writer);
}
// We should only do the log aggregation for stdout.
// Since we could not open the fileInputStream for stderr, this file is not
// aggregated.
String s = writer.toString();
int expectedLength =
"LogType:stdout".length()
+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
.currentTimeMillis())).length() : 0)
+ ("\nLogLength:" + numChars).length()
+ "\nLog Contents:\n".length() + numChars + "\n".length()
+ "End of LogType:stdout\n".length();
Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));
Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
StringBuilder sb = new StringBuilder();
for (int i = 0 ; i < numChars ; i++) {
sb.append(filler);
}
String expectedContent = sb.toString();
Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
Assert.assertEquals(expectedLength, s.length());
}
@Test(timeout=10000)
public void testContainerLogsFileAccess() throws IOException {
// This test will run only if NativeIO is enabled as SecureIOUtils
// require it to be enabled.
Assume.assumeTrue(NativeIO.isAvailable());
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
File workDir = new File(testWorkDir, "testContainerLogsFileAccess1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
String data = "Log File content for container : ";
// Creating files for container1. Log aggregator will try to read log files
// with illegal user.
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId testContainerId1 =
ContainerId.newContainerId(applicationAttemptId, 1);
Path appDir =
new Path(srcFileRoot, testContainerId1.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath1 = new Path(appDir, testContainerId1.toString());
String stdout = "stdout";
String stderr = "stderr";
writeSrcFile(srcFilePath1, stdout, data + testContainerId1.toString()
+ stdout);
writeSrcFile(srcFilePath1, stderr, data + testContainerId1.toString()
+ stderr);
UserGroupInformation ugi =
UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId1);
String randomUser = "randomUser";
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId1, randomUser));
// It is trying simulate a situation where first log file is owned by
// different user (probably symlink) and second one by the user itself.
// The first file should not be aggregated. Because this log file has the invalid
// user name.
when(logValue.getUser()).thenReturn(randomUser).thenReturn(
ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.close();
BufferedReader in =
new BufferedReader(new FileReader(new File(remoteAppLogFile
.toUri().getRawPath())));
String line;
StringBuffer sb = new StringBuffer("");
while ((line = in.readLine()) != null) {
LOG.info(line);
sb.append(line);
}
line = sb.toString();
String expectedOwner = ugi.getShortUserName();
if (Path.WINDOWS) {
final String adminsGroupString = "Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner = adminsGroupString;
}
}
// This file: stderr should not be aggregated.
// And we will not aggregate the log message.
String stdoutFile1 =
StringUtils.join(
File.separator,
Arrays.asList(new String[] {
workDir.getAbsolutePath(), "srcFiles",
testContainerId1.getApplicationAttemptId().getApplicationId()
.toString(), testContainerId1.toString(), stderr }));
// The file: stdout is expected to be aggregated.
String stdoutFile2 =
StringUtils.join(
File.separator,
Arrays.asList(new String[] {
workDir.getAbsolutePath(), "srcFiles",
testContainerId1.getApplicationAttemptId().getApplicationId()
.toString(), testContainerId1.toString(), stdout }));
String message2 =
"Owner '" + expectedOwner + "' for path "
+ stdoutFile2 + " did not match expected owner '"
+ ugi.getShortUserName() + "'";
Assert.assertFalse(line.contains(message2));
Assert.assertFalse(line.contains(data + testContainerId1.toString()
+ stderr));
Assert.assertTrue(line.contains(data + testContainerId1.toString()
+ stdout));
}
private void writeSrcFile(Path srcFilePath, String fileName, long length)
throws IOException {
OutputStreamWriter osw = getOutputStreamWriter(srcFilePath, fileName);
int ch = filler;
for (int i = 0; i < length; i++) {
osw.write(ch);
}
osw.close();
}
private void writeSrcFile(Path srcFilePath, String fileName, String data)
throws IOException {
OutputStreamWriter osw = getOutputStreamWriter(srcFilePath, fileName);
osw.write(data);
osw.close();
}
private OutputStreamWriter getOutputStreamWriter(Path srcFilePath,
String fileName) throws IOException, FileNotFoundException,
UnsupportedEncodingException {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
return osw;
}
}
| 15,791 | 37.330097 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Before;
import org.junit.Test;
import org.junit.Assert;
import static org.mockito.Mockito.*;
public class TestAggregatedLogDeletionService {
@Before
public void closeFilesystems() throws IOException {
// prevent the same mockfs instance from being reused due to FS cache
FileSystem.closeAll();
}
@Test
public void testDeletion() throws Exception {
long now = System.currentTimeMillis();
long toDeleteTime = now - (2000*1000);
long toKeepTime = now - (1500*1000);
String root = "mockfs://foo/";
String remoteRootLogDir = root+"tmp/logs";
String suffix = "logs";
final Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800");
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
Path rootPath = new Path(root);
FileSystem rootFs = rootPath.getFileSystem(conf);
FileSystem mockFs = ((FilterFileSystem)rootFs).getRawFileSystem();
Path remoteRootLogPath = new Path(remoteRootLogDir);
Path userDir = new Path(remoteRootLogPath, "me");
FileStatus userDirStatus = new FileStatus(0, true, 0, 0, toKeepTime, userDir);
when(mockFs.listStatus(remoteRootLogPath)).thenReturn(
new FileStatus[]{userDirStatus});
ApplicationId appId1 =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
Path userLogDir = new Path(userDir, suffix);
Path app1Dir = new Path(userLogDir, appId1.toString());
FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
ApplicationId appId2 =
ApplicationId.newInstance(System.currentTimeMillis(), 2);
Path app2Dir = new Path(userLogDir, appId2.toString());
FileStatus app2DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app2Dir);
ApplicationId appId3 =
ApplicationId.newInstance(System.currentTimeMillis(), 3);
Path app3Dir = new Path(userLogDir, appId3.toString());
FileStatus app3DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app3Dir);
ApplicationId appId4 =
ApplicationId.newInstance(System.currentTimeMillis(), 4);
Path app4Dir = new Path(userLogDir, appId4.toString());
FileStatus app4DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app4Dir);
ApplicationId appId5 =
ApplicationId.newInstance(System.currentTimeMillis(), 5);
Path app5Dir = new Path(userLogDir, appId5.toString());
FileStatus app5DirStatus =
new FileStatus(0, true, 0, 0, toDeleteTime, app5Dir);
when(mockFs.listStatus(userLogDir)).thenReturn(
new FileStatus[] { app1DirStatus, app2DirStatus, app3DirStatus,
app4DirStatus, app5DirStatus });
when(mockFs.listStatus(app1Dir)).thenReturn(
new FileStatus[]{});
Path app2Log1 = new Path(app2Dir, "host1");
FileStatus app2Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app2Log1);
Path app2Log2 = new Path(app2Dir, "host2");
FileStatus app2Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app2Log2);
when(mockFs.listStatus(app2Dir)).thenReturn(
new FileStatus[]{app2Log1Status, app2Log2Status});
Path app3Log1 = new Path(app3Dir, "host1");
FileStatus app3Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log1);
Path app3Log2 = new Path(app3Dir, "host2");
FileStatus app3Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log2);
when(mockFs.delete(app3Dir, true)).thenThrow(new AccessControlException("Injected Error\nStack Trace :("));
when(mockFs.listStatus(app3Dir)).thenReturn(
new FileStatus[]{app3Log1Status, app3Log2Status});
Path app4Log1 = new Path(app4Dir, "host1");
FileStatus app4Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log1);
Path app4Log2 = new Path(app4Dir, "host2");
FileStatus app4Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log2);
when(mockFs.listStatus(app4Dir)).thenReturn(
new FileStatus[]{app4Log1Status, app4Log2Status});
Path app5Log1 = new Path(app5Dir, "host1");
FileStatus app5Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app5Log1);
Path app5Log2 = new Path(app5Dir, "host2");
FileStatus app5Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app5Log2);
when(mockFs.listStatus(app5Dir)).thenReturn(
new FileStatus[]{app5Log1Status, app5Log2Status});
final List<ApplicationId> finishedApplications =
Collections.unmodifiableList(Arrays.asList(appId1, appId2, appId3,
appId4));
final List<ApplicationId> runningApplications =
Collections.unmodifiableList(Arrays.asList(appId5));
AggregatedLogDeletionService deletionService =
new AggregatedLogDeletionService() {
@Override
protected ApplicationClientProtocol creatRMClient()
throws IOException {
try {
return createMockRMClient(finishedApplications,
runningApplications);
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
protected void stopRMClient() {
// DO NOTHING
}
};
deletionService.init(conf);
deletionService.start();
verify(mockFs, timeout(2000)).delete(app1Dir, true);
verify(mockFs, timeout(2000).times(0)).delete(app2Dir, true);
verify(mockFs, timeout(2000)).delete(app3Dir, true);
verify(mockFs, timeout(2000)).delete(app4Dir, true);
verify(mockFs, timeout(2000).times(0)).delete(app5Dir, true);
verify(mockFs, timeout(2000)).delete(app5Log1, true);
verify(mockFs, timeout(2000).times(0)).delete(app5Log2, true);
deletionService.stop();
}
@Test
public void testRefreshLogRetentionSettings() throws Exception {
long now = System.currentTimeMillis();
//time before 2000 sec
long before2000Secs = now - (2000 * 1000);
//time before 50 sec
long before50Secs = now - (50 * 1000);
String root = "mockfs://foo/";
String remoteRootLogDir = root + "tmp/logs";
String suffix = "logs";
final Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,
"1");
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
Path rootPath = new Path(root);
FileSystem rootFs = rootPath.getFileSystem(conf);
FileSystem mockFs = ((FilterFileSystem) rootFs).getRawFileSystem();
Path remoteRootLogPath = new Path(remoteRootLogDir);
Path userDir = new Path(remoteRootLogPath, "me");
FileStatus userDirStatus = new FileStatus(0, true, 0, 0, before50Secs,
userDir);
when(mockFs.listStatus(remoteRootLogPath)).thenReturn(
new FileStatus[] { userDirStatus });
Path userLogDir = new Path(userDir, suffix);
ApplicationId appId1 =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
//Set time last modified of app1Dir directory and its files to before2000Secs
Path app1Dir = new Path(userLogDir, appId1.toString());
FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, before2000Secs,
app1Dir);
ApplicationId appId2 =
ApplicationId.newInstance(System.currentTimeMillis(), 2);
//Set time last modified of app1Dir directory and its files to before50Secs
Path app2Dir = new Path(userLogDir, appId2.toString());
FileStatus app2DirStatus = new FileStatus(0, true, 0, 0, before50Secs,
app2Dir);
when(mockFs.listStatus(userLogDir)).thenReturn(
new FileStatus[] { app1DirStatus, app2DirStatus });
Path app1Log1 = new Path(app1Dir, "host1");
FileStatus app1Log1Status = new FileStatus(10, false, 1, 1, before2000Secs,
app1Log1);
when(mockFs.listStatus(app1Dir)).thenReturn(
new FileStatus[] { app1Log1Status });
Path app2Log1 = new Path(app2Dir, "host1");
FileStatus app2Log1Status = new FileStatus(10, false, 1, 1, before50Secs,
app2Log1);
when(mockFs.listStatus(app2Dir)).thenReturn(
new FileStatus[] { app2Log1Status });
final List<ApplicationId> finishedApplications =
Collections.unmodifiableList(Arrays.asList(appId1, appId2));
AggregatedLogDeletionService deletionSvc = new AggregatedLogDeletionService() {
@Override
protected Configuration createConf() {
return conf;
}
@Override
protected ApplicationClientProtocol creatRMClient()
throws IOException {
try {
return createMockRMClient(finishedApplications, null);
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
protected void stopRMClient() {
// DO NOTHING
}
};
deletionSvc.init(conf);
deletionSvc.start();
//app1Dir would be deleted since its done above log retention period
verify(mockFs, timeout(10000)).delete(app1Dir, true);
//app2Dir is not expected to be deleted since its below the threshold
verify(mockFs, timeout(3000).times(0)).delete(app2Dir, true);
//Now,lets change the confs
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "50");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,
"2");
//We have not called refreshLogSettings,hence don't expect to see the changed conf values
Assert.assertTrue(2000l != deletionSvc.getCheckIntervalMsecs());
//refresh the log settings
deletionSvc.refreshLogRetentionSettings();
//Check interval time should reflect the new value
Assert.assertTrue(2000l == deletionSvc.getCheckIntervalMsecs());
//app2Dir should be deleted since it falls above the threshold
verify(mockFs, timeout(10000)).delete(app2Dir, true);
deletionSvc.stop();
}
@Test
public void testCheckInterval() throws Exception {
long RETENTION_SECS = 10 * 24 * 3600;
long now = System.currentTimeMillis();
long toDeleteTime = now - RETENTION_SECS*1000;
String root = "mockfs://foo/";
String remoteRootLogDir = root+"tmp/logs";
String suffix = "logs";
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "864000");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "1");
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
// prevent us from picking up the same mockfs instance from another test
FileSystem.closeAll();
Path rootPath = new Path(root);
FileSystem rootFs = rootPath.getFileSystem(conf);
FileSystem mockFs = ((FilterFileSystem)rootFs).getRawFileSystem();
Path remoteRootLogPath = new Path(remoteRootLogDir);
Path userDir = new Path(remoteRootLogPath, "me");
FileStatus userDirStatus = new FileStatus(0, true, 0, 0, now, userDir);
when(mockFs.listStatus(remoteRootLogPath)).thenReturn(
new FileStatus[]{userDirStatus});
ApplicationId appId1 =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
Path userLogDir = new Path(userDir, suffix);
Path app1Dir = new Path(userLogDir, appId1.toString());
FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, now, app1Dir);
when(mockFs.listStatus(userLogDir)).thenReturn(
new FileStatus[]{app1DirStatus});
Path app1Log1 = new Path(app1Dir, "host1");
FileStatus app1Log1Status = new FileStatus(10, false, 1, 1, now, app1Log1);
when(mockFs.listStatus(app1Dir)).thenReturn(
new FileStatus[]{app1Log1Status});
final List<ApplicationId> finishedApplications =
Collections.unmodifiableList(Arrays.asList(appId1));
AggregatedLogDeletionService deletionSvc =
new AggregatedLogDeletionService() {
@Override
protected ApplicationClientProtocol creatRMClient()
throws IOException {
try {
return createMockRMClient(finishedApplications, null);
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
protected void stopRMClient() {
// DO NOTHING
}
};
deletionSvc.init(conf);
deletionSvc.start();
verify(mockFs, timeout(10000).atLeast(4)).listStatus(any(Path.class));
verify(mockFs, never()).delete(app1Dir, true);
// modify the timestamp of the logs and verify it's picked up quickly
app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
app1Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app1Log1);
when(mockFs.listStatus(userLogDir)).thenReturn(
new FileStatus[]{app1DirStatus});
when(mockFs.listStatus(app1Dir)).thenReturn(
new FileStatus[]{app1Log1Status});
verify(mockFs, timeout(10000)).delete(app1Dir, true);
deletionSvc.stop();
}
static class MockFileSystem extends FilterFileSystem {
MockFileSystem() {
super(mock(FileSystem.class));
}
public void initialize(URI name, Configuration conf) throws IOException {}
}
private static ApplicationClientProtocol createMockRMClient(
List<ApplicationId> finishedApplicaitons,
List<ApplicationId> runningApplications) throws Exception {
final ApplicationClientProtocol mockProtocol =
mock(ApplicationClientProtocol.class);
if (finishedApplicaitons != null && !finishedApplicaitons.isEmpty()) {
for (ApplicationId appId : finishedApplicaitons) {
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response =
createApplicationReportWithFinishedApplication();
when(mockProtocol.getApplicationReport(request))
.thenReturn(response);
}
}
if (runningApplications != null && !runningApplications.isEmpty()) {
for (ApplicationId appId : runningApplications) {
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response =
createApplicationReportWithRunningApplication();
when(mockProtocol.getApplicationReport(request))
.thenReturn(response);
}
}
return mockProtocol;
}
private static GetApplicationReportResponse
createApplicationReportWithRunningApplication() {
ApplicationReport report = mock(ApplicationReport.class);
when(report.getYarnApplicationState()).thenReturn(
YarnApplicationState.RUNNING);
GetApplicationReportResponse response =
mock(GetApplicationReportResponse.class);
when(response.getApplicationReport()).thenReturn(report);
return response;
}
private static GetApplicationReportResponse
createApplicationReportWithFinishedApplication() {
ApplicationReport report = mock(ApplicationReport.class);
when(report.getYarnApplicationState()).thenReturn(
YarnApplicationState.FINISHED);
GetApplicationReportResponse response =
mock(GetApplicationReportResponse.class);
when(response.getApplicationReport()).thenReturn(report);
return response;
}
}
| 17,613 | 38.582022 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlockForTest;
import org.apache.hadoop.yarn.webapp.view.BlockForTest;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest;
import org.junit.Test;
import static org.mockito.Mockito.*;
import static org.junit.Assert.*;
/**
* Test AggregatedLogsBlock. AggregatedLogsBlock should check user, aggregate a
* logs into one file and show this logs or errors into html code
*
*/
public class TestAggregatedLogsBlock {
/**
* Bad user. User 'owner' is trying to read logs without access
*/
@Test
public void testAccessDenied() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration = getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration, "owner");
AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
configuration, "owner", "container_0_0001_01_000001");
ByteArrayOutputStream data = new ByteArrayOutputStream();
PrintWriter printWriter = new PrintWriter(data);
HtmlBlock html = new HtmlBlockForTest();
HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out = data.toString();
assertTrue(out
.contains("User [owner] is not authorized to view the logs for entity"));
}
/**
* try to read bad logs
*
* @throws Exception
*/
@Test
public void testBadLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration = getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration, "owner");
AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
configuration, "admin", "container_0_0001_01_000001");
ByteArrayOutputStream data = new ByteArrayOutputStream();
PrintWriter printWriter = new PrintWriter(data);
HtmlBlock html = new HtmlBlockForTest();
HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out = data.toString();
assertTrue(out
.contains("Logs not available for entity. Aggregation may not be complete, Check back later or try the nodemanager at localhost:1234"));
}
/**
* All ok and the AggregatedLogsBlockFor should aggregate logs and show it.
*
* @throws Exception
*/
@Test
public void testAggregatedLogsBlock() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration = getConfiguration();
writeLogs("target/logs/logs/application_0_0001/container_0_0001_01_000001");
writeLog(configuration, "admin");
AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
configuration, "admin", "container_0_0001_01_000001");
ByteArrayOutputStream data = new ByteArrayOutputStream();
PrintWriter printWriter = new PrintWriter(data);
HtmlBlock html = new HtmlBlockForTest();
HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out = data.toString();
assertTrue(out.contains("test log1"));
assertTrue(out.contains("test log2"));
assertTrue(out.contains("test log3"));
}
/**
* Log files was deleted.
* @throws Exception
*/
@Test
public void testNoLogs() throws Exception {
FileUtil.fullyDelete(new File("target/logs"));
Configuration configuration = getConfiguration();
File f = new File("target/logs/logs/application_0_0001/container_0_0001_01_000001");
if (!f.exists()) {
assertTrue(f.mkdirs());
}
writeLog(configuration, "admin");
AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
configuration, "admin", "container_0_0001_01_000001");
ByteArrayOutputStream data = new ByteArrayOutputStream();
PrintWriter printWriter = new PrintWriter(data);
HtmlBlock html = new HtmlBlockForTest();
HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
aggregatedBlock.render(block);
block.getWriter().flush();
String out = data.toString();
assertTrue(out.contains("No logs available for container container_0_0001_01_000001"));
}
private Configuration getConfiguration() {
Configuration configuration = new Configuration();
configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
configuration.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, "target/logs");
configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
configuration.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
return configuration;
}
private AggregatedLogsBlockForTest getAggregatedLogsBlockForTest(
Configuration configuration, String user, String containerId) {
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getRemoteUser()).thenReturn(user);
AggregatedLogsBlockForTest aggregatedBlock = new AggregatedLogsBlockForTest(
configuration);
aggregatedBlock.setRequest(request);
aggregatedBlock.moreParams().put(YarnWebParams.CONTAINER_ID, containerId);
aggregatedBlock.moreParams().put(YarnWebParams.NM_NODENAME,
"localhost:1234");
aggregatedBlock.moreParams().put(YarnWebParams.APP_OWNER, user);
aggregatedBlock.moreParams().put("start", "");
aggregatedBlock.moreParams().put("end", "");
aggregatedBlock.moreParams().put(YarnWebParams.ENTITY_STRING, "entity");
return aggregatedBlock;
}
private void writeLog(Configuration configuration, String user)
throws Exception {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1);
ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
String path = "target/logs/" + user
+ "/logs/application_0_0001/localhost_1234";
File f = new File(path);
if (!f.getParentFile().exists()) {
assertTrue(f.getParentFile().mkdirs());
}
List<String> rootLogDirs = Arrays.asList("target/logs/logs");
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
AggregatedLogFormat.LogWriter writer = new AggregatedLogFormat.LogWriter(
configuration, new Path(path), ugi);
writer.writeApplicationOwner(ugi.getUserName());
Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>();
appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
writer.writeApplicationACLs(appAcls);
writer.append(new AggregatedLogFormat.LogKey("container_0_0001_01_000001"),
new AggregatedLogFormat.LogValue(rootLogDirs, containerId,UserGroupInformation.getCurrentUser().getShortUserName()));
writer.close();
}
private void writeLogs(String dirName) throws Exception {
File f = new File(dirName + File.separator + "log1");
if (!f.getParentFile().exists()) {
assertTrue(f.getParentFile().mkdirs());
}
writeLog(dirName + File.separator + "log1", "test log1");
writeLog(dirName + File.separator + "log2", "test log2");
writeLog(dirName + File.separator + "log3", "test log3");
}
private void writeLog(String fileName, String text) throws Exception {
File f = new File(fileName);
Writer writer = new FileWriter(f);
writer.write(text);
writer.flush();
writer.close();
}
}
| 9,567 | 36.669291 | 144 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/server/security/TestApplicationACLsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
public class TestApplicationACLsManager {
private static final String ADMIN_USER = "adminuser";
private static final String APP_OWNER = "appuser";
private static final String TESTUSER1 = "testuser1";
private static final String TESTUSER2 = "testuser2";
private static final String TESTUSER3 = "testuser3";
@Test
public void testCheckAccess() {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,
true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,
ADMIN_USER);
ApplicationACLsManager aclManager = new ApplicationACLsManager(conf);
Map<ApplicationAccessType, String> aclMap =
new HashMap<ApplicationAccessType, String>();
aclMap.put(ApplicationAccessType.VIEW_APP, TESTUSER1 + "," + TESTUSER3);
aclMap.put(ApplicationAccessType.MODIFY_APP, TESTUSER1);
ApplicationId appId = ApplicationId.newInstance(1, 1);
aclManager.addApplication(appId, aclMap);
//User in ACL, should be allowed access
UserGroupInformation testUser1 = UserGroupInformation
.createRemoteUser(TESTUSER1);
assertTrue(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
//User NOT in ACL, should not be allowed access
UserGroupInformation testUser2 = UserGroupInformation
.createRemoteUser(TESTUSER2);
assertFalse(aclManager.checkAccess(testUser2, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser2, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
//User has View access, but not modify access
UserGroupInformation testUser3 = UserGroupInformation
.createRemoteUser(TESTUSER3);
assertTrue(aclManager.checkAccess(testUser3, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser3, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
//Application Owner should have all access
UserGroupInformation appOwner = UserGroupInformation
.createRemoteUser(APP_OWNER);
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
//Admin should have all access
UserGroupInformation adminUser = UserGroupInformation
.createRemoteUser(ADMIN_USER);
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
}
@Test
public void testCheckAccessWithNullACLS() {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,
true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,
ADMIN_USER);
ApplicationACLsManager aclManager = new ApplicationACLsManager(conf);
UserGroupInformation appOwner = UserGroupInformation
.createRemoteUser(APP_OWNER);
ApplicationId appId = ApplicationId.newInstance(1, 1);
//Application ACL is not added
//Application Owner should have all access even if Application ACL is not added
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
//Admin should have all access
UserGroupInformation adminUser = UserGroupInformation
.createRemoteUser(ADMIN_USER);
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
// A regular user should Not have access
UserGroupInformation testUser1 = UserGroupInformation
.createRemoteUser(TESTUSER1);
assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
}
@Test
public void testCheckAccessWithPartialACLS() {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,
true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,
ADMIN_USER);
ApplicationACLsManager aclManager = new ApplicationACLsManager(conf);
UserGroupInformation appOwner = UserGroupInformation
.createRemoteUser(APP_OWNER);
// Add only the VIEW ACLS
Map<ApplicationAccessType, String> aclMap =
new HashMap<ApplicationAccessType, String>();
aclMap.put(ApplicationAccessType.VIEW_APP, TESTUSER1 );
ApplicationId appId = ApplicationId.newInstance(1, 1);
aclManager.addApplication(appId, aclMap);
//Application Owner should have all access even if Application ACL is not added
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
//Admin should have all access
UserGroupInformation adminUser = UserGroupInformation
.createRemoteUser(ADMIN_USER);
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
// testuser1 should have view access only
UserGroupInformation testUser1 = UserGroupInformation
.createRemoteUser(TESTUSER1);
assertTrue(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
// A testuser2 should Not have access
UserGroupInformation testUser2 = UserGroupInformation
.createRemoteUser(TESTUSER2);
assertFalse(aclManager.checkAccess(testUser2, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser2, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
}
}
| 7,729 | 41.707182 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience;
| 924 | 41.045455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public class LocalConfigurationProvider extends ConfigurationProvider {
@Override
public InputStream getConfigurationInputStream(Configuration bootstrapConf,
String name) throws IOException, YarnException {
if (name == null || name.isEmpty()) {
throw new YarnException(
"Illegal argument! The parameter should not be null or empty");
} else if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) {
return bootstrapConf.getConfResourceAsInputStream(name);
}
return new FileInputStream(name);
}
@Override
public void initInternal(Configuration bootstrapConf) throws Exception {
// Do nothing
}
@Override
public void closeInternal() throws Exception {
// Do nothing
}
}
| 2,045 | 34.275862 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public class FileSystemBasedConfigurationProvider
extends ConfigurationProvider {
private static final Log LOG = LogFactory
.getLog(FileSystemBasedConfigurationProvider.class);
private FileSystem fs;
private Path configDir;
@Override
public synchronized InputStream getConfigurationInputStream(
Configuration bootstrapConf, String name) throws IOException,
YarnException {
if (name == null || name.isEmpty()) {
throw new YarnException(
"Illegal argument! The parameter should not be null or empty");
}
Path filePath;
if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) {
filePath = new Path(this.configDir, name);
if (!fs.exists(filePath)) {
LOG.info(filePath + " not found");
return null;
}
} else {
filePath = new Path(name);
if (!fs.exists(filePath)) {
LOG.info(filePath + " not found");
return null;
}
}
return fs.open(filePath);
}
@Override
public synchronized void initInternal(Configuration bootstrapConf)
throws Exception {
configDir =
new Path(bootstrapConf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE));
fs = configDir.getFileSystem(bootstrapConf);
if (!fs.exists(configDir)) {
fs.mkdirs(configDir);
}
}
@Override
public synchronized void closeInternal() throws Exception {
fs.close();
}
}
| 2,900 | 32.344828 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.File;
import java.io.Flushable;
import java.util.LinkedList;
import java.util.Queue;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.FileAppender;
import org.apache.log4j.spi.LoggingEvent;
/**
* A simple log4j-appender for container's logs.
*
*/
@Public
@Unstable
public class ContainerLogAppender extends FileAppender
implements Flushable
{
private String containerLogDir;
private String containerLogFile;
//so that log4j can configure it from the configuration(log4j.properties).
private int maxEvents;
private Queue<LoggingEvent> tail = null;
private boolean closing = false;
@Override
public void activateOptions() {
synchronized (this) {
if (maxEvents > 0) {
tail = new LinkedList<LoggingEvent>();
}
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}
}
@Override
public void append(LoggingEvent event) {
synchronized (this) {
if (closing) { // When closing drop any new/transitive CLA appending
return;
}
if (tail == null) {
super.append(event);
} else {
if (tail.size() >= maxEvents) {
tail.remove();
}
tail.add(event);
}
}
}
@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}
@Override
public synchronized void close() {
closing = true;
if (tail != null) {
for (LoggingEvent event : tail) {
super.append(event);
}
}
super.close();
}
/**
* Getter/Setter methods for log4j.
*/
public String getContainerLogDir() {
return this.containerLogDir;
}
public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}
public String getContainerLogFile() {
return containerLogFile;
}
public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}
private static final int EVENT_SIZE = 100;
public long getTotalLogFileSize() {
return maxEvents * EVENT_SIZE;
}
public void setTotalLogFileSize(long logSize) {
maxEvents = (int) logSize / EVENT_SIZE;
}
}
| 3,173 | 24.596774 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.lang.Thread.UncaughtExceptionHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ShutdownHookManager;
/**
* This class is intended to be installed by calling
* {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
* In the main entry point. It is intended to try and cleanly shut down
* programs using the Yarn Event framework.
*
* Note: Right now it only will shut down the program if a Error is caught, but
* not any other exception. Anything else is just logged.
*/
@Public
@Evolving
public class YarnUncaughtExceptionHandler implements UncaughtExceptionHandler {
private static final Log LOG = LogFactory.getLog(YarnUncaughtExceptionHandler.class);
@Override
public void uncaughtException(Thread t, Throwable e) {
if(ShutdownHookManager.get().isShutdownInProgress()) {
LOG.error("Thread " + t + " threw an Throwable, but we are shutting " +
"down, so ignoring this", e);
} else if(e instanceof Error) {
try {
LOG.fatal("Thread " + t + " threw an Error. Shutting down now...", e);
} catch (Throwable err) {
//We don't want to not exit because of an issue with logging
}
if(e instanceof OutOfMemoryError) {
//After catching an OOM java says it is undefined behavior, so don't
//even try to clean up or we can get stuck on shutdown.
try {
System.err.println("Halting due to Out Of Memory Error...");
} catch (Throwable err) {
//Again we done want to exit because of logging issues.
}
ExitUtil.halt(-1);
} else {
ExitUtil.terminate(-1);
}
} else {
LOG.error("Thread " + t + " threw an Exception.", e);
}
}
}
| 2,804 | 37.958333 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerRollingLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.RollingFileAppender;
import java.io.File;
import java.io.Flushable;
/**
* A simple log4j-appender for container's logs.
*
*/
@Public
@Unstable
public class ContainerRollingLogAppender extends RollingFileAppender
implements Flushable {
private String containerLogDir;
private String containerLogFile;
@Override
public void activateOptions() {
synchronized (this) {
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}
}
@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}
/**
* Getter/Setter methods for log4j.
*/
public String getContainerLogDir() {
return this.containerLogDir;
}
public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}
public String getContainerLogFile() {
return containerLogFile;
}
public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}
}
| 2,035 | 26.146667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factory.providers;
| 857 | 41.9 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RpcFactoryProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factory.providers;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RpcClientFactory;
import org.apache.hadoop.yarn.factories.RpcServerFactory;
/**
* A public static get() method must be present in the Client/Server Factory implementation.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class RpcFactoryProvider {
private RpcFactoryProvider() {
}
public static RpcServerFactory getServerFactory(Configuration conf) {
if (conf == null) {
conf = new Configuration();
}
String serverFactoryClassName = conf.get(
YarnConfiguration.IPC_SERVER_FACTORY_CLASS,
YarnConfiguration.DEFAULT_IPC_SERVER_FACTORY_CLASS);
return (RpcServerFactory) getFactoryClassInstance(serverFactoryClassName);
}
public static RpcClientFactory getClientFactory(Configuration conf) {
String clientFactoryClassName = conf.get(
YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,
YarnConfiguration.DEFAULT_IPC_CLIENT_FACTORY_CLASS);
return (RpcClientFactory) getFactoryClassInstance(clientFactoryClassName);
}
private static Object getFactoryClassInstance(String factoryClassName) {
try {
Class<?> clazz = Class.forName(factoryClassName);
Method method = clazz.getMethod("get", null);
method.setAccessible(true);
return method.invoke(null, null);
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException(e);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException(e);
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
}
}
}
| 2,832 | 35.792208 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/sharedcache/SharedCacheChecksumFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sharedcache;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@SuppressWarnings("unchecked")
@Public
@Evolving
/**
* A factory class for creating checksum objects based on a configurable
* algorithm implementation
*/
public class SharedCacheChecksumFactory {
private static final
ConcurrentMap<Class<? extends SharedCacheChecksum>,SharedCacheChecksum>
instances =
new ConcurrentHashMap<Class<? extends SharedCacheChecksum>,
SharedCacheChecksum>();
private static final Class<? extends SharedCacheChecksum> defaultAlgorithm;
static {
try {
defaultAlgorithm = (Class<? extends SharedCacheChecksum>)
Class.forName(
YarnConfiguration.DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL);
} catch (Exception e) {
// cannot happen
throw new ExceptionInInitializerError(e);
}
}
/**
* Get a new <code>SharedCacheChecksum</code> object based on the configurable
* algorithm implementation
* (see <code>yarn.sharedcache.checksum.algo.impl</code>)
*
* @return <code>SharedCacheChecksum</code> object
*/
public static SharedCacheChecksum getChecksum(Configuration conf) {
Class<? extends SharedCacheChecksum> clazz =
conf.getClass(YarnConfiguration.SHARED_CACHE_CHECKSUM_ALGO_IMPL,
defaultAlgorithm, SharedCacheChecksum.class);
SharedCacheChecksum checksum = instances.get(clazz);
if (checksum == null) {
try {
checksum = ReflectionUtils.newInstance(clazz, conf);
SharedCacheChecksum old = instances.putIfAbsent(clazz, checksum);
if (old != null) {
checksum = old;
}
} catch (Exception e) {
throw new YarnRuntimeException(e);
}
}
return checksum;
}
}
| 2,983 | 34.105882 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/sharedcache/ChecksumSHA256Impl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.sharedcache;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Private
@Evolving
/**
* The SHA-256 implementation of the shared cache checksum interface.
*/
public class ChecksumSHA256Impl implements SharedCacheChecksum {
public String computeChecksum(InputStream in) throws IOException {
return DigestUtils.sha256Hex(in);
}
}
| 1,374 | 35.184211 | 75 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.