repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationRequestEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
import java.util.Collection;
import java.util.Map;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
/**
* Event that requests that the {@link ResourceLocalizationService} localize
* a set of resources for the given container. This is generated by
* {@link ContainerImpl} during container initialization.
*/
public class ContainerLocalizationRequestEvent extends
ContainerLocalizationEvent {
private final Map<LocalResourceVisibility, Collection<LocalResourceRequest>>
rsrc;
/**
* Event requesting the localization of the rsrc.
* @param c
* @param rsrc
*/
public ContainerLocalizationRequestEvent(Container c,
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrc) {
super(LocalizationEventType.INIT_CONTAINER_RESOURCES, c);
this.rsrc = rsrc;
}
public
Map<LocalResourceVisibility, Collection<LocalResourceRequest>>
getRequestedResources() {
return rsrc;
}
}
| 2,217 | 38.607143 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
/**
* Events handled by {@link ResourceLocalizationService}
*/
public class LocalizationEvent extends AbstractEvent<LocalizationEventType> {
public LocalizationEvent(LocalizationEventType event) {
super(event);
}
}
| 1,261 | 36.117647 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
public interface LogHandler extends EventHandler<LogHandlerEvent> {
public void handle(LogHandlerEvent event);
}
| 1,137 | 42.769231 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.RejectedExecutionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLogDeleterState;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Log Handler which schedules deletion of log files based on the configured log
* retention time.
*/
public class NonAggregatingLogHandler extends AbstractService implements
LogHandler {
private static final Log LOG = LogFactory
.getLog(NonAggregatingLogHandler.class);
private final Dispatcher dispatcher;
private final DeletionService delService;
private final Map<ApplicationId, String> appOwners;
private final LocalDirsHandlerService dirsHandler;
private final NMStateStoreService stateStore;
private long deleteDelaySeconds;
private ScheduledThreadPoolExecutor sched;
public NonAggregatingLogHandler(Dispatcher dispatcher,
DeletionService delService, LocalDirsHandlerService dirsHandler,
NMStateStoreService stateStore) {
super(NonAggregatingLogHandler.class.getName());
this.dispatcher = dispatcher;
this.delService = delService;
this.dirsHandler = dirsHandler;
this.stateStore = stateStore;
this.appOwners = new ConcurrentHashMap<ApplicationId, String>();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
// Default 3 hours.
this.deleteDelaySeconds =
conf.getLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS,
YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS);
sched = createScheduledThreadPoolExecutor(conf);
super.serviceInit(conf);
recover();
}
@Override
protected void serviceStop() throws Exception {
if (sched != null) {
sched.shutdown();
boolean isShutdown = false;
try {
isShutdown = sched.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
sched.shutdownNow();
isShutdown = true;
}
if (!isShutdown) {
sched.shutdownNow();
}
}
super.serviceStop();
}
FileContext getLocalFileContext(Configuration conf) {
try {
return FileContext.getLocalFSFileContext(conf);
} catch (IOException e) {
throw new YarnRuntimeException("Failed to access local fs");
}
}
private void recover() throws IOException {
if (stateStore.canRecover()) {
RecoveredLogDeleterState state = stateStore.loadLogDeleterState();
long now = System.currentTimeMillis();
for (Map.Entry<ApplicationId, LogDeleterProto> entry :
state.getLogDeleterMap().entrySet()) {
ApplicationId appId = entry.getKey();
LogDeleterProto proto = entry.getValue();
long deleteDelayMsec = proto.getDeletionTime() - now;
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling deletion of " + appId + " logs in "
+ deleteDelayMsec + " msec");
}
LogDeleterRunnable logDeleter =
new LogDeleterRunnable(proto.getUser(), appId);
try {
sched.schedule(logDeleter, deleteDelayMsec, TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
}
}
}
@SuppressWarnings("unchecked")
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartedEvent =
(LogHandlerAppStartedEvent) event;
this.appOwners.put(appStartedEvent.getApplicationId(),
appStartedEvent.getUser());
this.dispatcher.getEventHandler().handle(
new ApplicationEvent(appStartedEvent.getApplicationId(),
ApplicationEventType.APPLICATION_LOG_HANDLING_INITED));
break;
case CONTAINER_FINISHED:
// Ignore
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
ApplicationId appId = appFinishedEvent.getApplicationId();
// Schedule - so that logs are available on the UI till they're deleted.
LOG.info("Scheduling Log Deletion for application: "
+ appId + ", with delay of "
+ this.deleteDelaySeconds + " seconds");
String user = appOwners.remove(appId);
if (user == null) {
LOG.error("Unable to locate user for " + appId);
break;
}
LogDeleterRunnable logDeleter = new LogDeleterRunnable(user, appId);
long deletionTimestamp = System.currentTimeMillis()
+ this.deleteDelaySeconds * 1000;
LogDeleterProto deleterProto = LogDeleterProto.newBuilder()
.setUser(user)
.setDeletionTime(deletionTimestamp)
.build();
try {
stateStore.storeLogDeleter(appId, deleterProto);
} catch (IOException e) {
LOG.error("Unable to record log deleter state", e);
}
try {
sched.schedule(logDeleter, this.deleteDelaySeconds,
TimeUnit.SECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
break;
default:
; // Ignore
}
}
ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(
Configuration conf) {
ThreadFactory tf =
new ThreadFactoryBuilder().setNameFormat("LogDeleter #%d").build();
sched =
new ScheduledThreadPoolExecutor(conf.getInt(
YarnConfiguration.NM_LOG_DELETION_THREADS_COUNT,
YarnConfiguration.DEFAULT_NM_LOG_DELETE_THREAD_COUNT), tf);
return sched;
}
class LogDeleterRunnable implements Runnable {
private String user;
private ApplicationId applicationId;
public LogDeleterRunnable(String user, ApplicationId applicationId) {
this.user = user;
this.applicationId = applicationId;
}
@Override
@SuppressWarnings("unchecked")
public void run() {
List<Path> localAppLogDirs = new ArrayList<Path>();
FileContext lfs = getLocalFileContext(getConfig());
for (String rootLogDir : dirsHandler.getLogDirsForCleanup()) {
Path logDir = new Path(rootLogDir, applicationId.toString());
try {
lfs.getFileStatus(logDir);
localAppLogDirs.add(logDir);
} catch (UnsupportedFileSystemException ue) {
LOG.warn("Unsupported file system used for log dir " + logDir, ue);
continue;
} catch (IOException ie) {
continue;
}
}
// Inform the application before the actual delete itself, so that links
// to logs will no longer be there on NM web-UI.
NonAggregatingLogHandler.this.dispatcher.getEventHandler().handle(
new ApplicationEvent(this.applicationId,
ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
if (localAppLogDirs.size() > 0) {
NonAggregatingLogHandler.this.delService.delete(user, null,
(Path[]) localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
}
try {
NonAggregatingLogHandler.this.stateStore.removeLogDeleter(
this.applicationId);
} catch (IOException e) {
LOG.error("Error removing log deletion state", e);
}
}
@Override
public String toString() {
return "LogDeleter for AppId " + this.applicationId.toString()
+ ", owned by " + user;
}
}
}
| 10,219 | 38.007634 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class LogHandlerEvent extends AbstractEvent<LogHandlerEventType>{
public LogHandlerEvent(LogHandlerEventType type) {
super(type);
}
}
| 1,080 | 36.275862 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
import org.apache.hadoop.yarn.api.records.ContainerId;
public class LogHandlerContainerFinishedEvent extends LogHandlerEvent {
private final ContainerId containerId;
private final int exitCode;
public LogHandlerContainerFinishedEvent(ContainerId containerId,
int exitCode) {
super(LogHandlerEventType.CONTAINER_FINISHED);
this.containerId = containerId;
this.exitCode = exitCode;
}
public ContainerId getContainerId() {
return this.containerId;
}
public int getExitCode() {
return this.exitCode;
}
}
| 1,426 | 31.431818 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
public enum LogHandlerEventType {
APPLICATION_STARTED, CONTAINER_FINISHED, APPLICATION_FINISHED
}
| 978 | 39.791667 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerAppStartedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
import java.util.Map;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
public class LogHandlerAppStartedEvent extends LogHandlerEvent {
private final ApplicationId applicationId;
private final ContainerLogsRetentionPolicy retentionPolicy;
private final String user;
private final Credentials credentials;
private final Map<ApplicationAccessType, String> appAcls;
private final LogAggregationContext logAggregationContext;
public LogHandlerAppStartedEvent(ApplicationId appId, String user,
Credentials credentials, ContainerLogsRetentionPolicy retentionPolicy,
Map<ApplicationAccessType, String> appAcls) {
this(appId, user, credentials, retentionPolicy, appAcls, null);
}
public LogHandlerAppStartedEvent(ApplicationId appId, String user,
Credentials credentials, ContainerLogsRetentionPolicy retentionPolicy,
Map<ApplicationAccessType, String> appAcls,
LogAggregationContext logAggregationContext) {
super(LogHandlerEventType.APPLICATION_STARTED);
this.applicationId = appId;
this.user = user;
this.credentials = credentials;
this.retentionPolicy = retentionPolicy;
this.appAcls = appAcls;
this.logAggregationContext = logAggregationContext;
}
public ApplicationId getApplicationId() {
return this.applicationId;
}
public Credentials getCredentials() {
return this.credentials;
}
public ContainerLogsRetentionPolicy getLogRetentionPolicy() {
return this.retentionPolicy;
}
public String getUser() {
return this.user;
}
public Map<ApplicationAccessType, String> getApplicationAcls() {
return this.appAcls;
}
public LogAggregationContext getLogAggregationContext() {
return this.logAggregationContext;
}
}
| 2,900 | 34.814815 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerAppFinishedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
import org.apache.hadoop.yarn.api.records.ApplicationId;
public class LogHandlerAppFinishedEvent extends LogHandlerEvent {
private final ApplicationId applicationId;
public LogHandlerAppFinishedEvent(ApplicationId appId) {
super(LogHandlerEventType.APPLICATION_FINISHED);
this.applicationId = appId;
}
public ApplicationId getApplicationId() {
return this.applicationId;
}
}
| 1,280 | 33.621622 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
public interface ContainersMonitor extends Service,
EventHandler<ContainersMonitorEvent>, ResourceView {
public ResourceUtilization getContainersUtilization();
}
| 1,263 | 41.133333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import com.google.common.base.Preconditions;
public class ContainersMonitorImpl extends AbstractService implements
ContainersMonitor {
final static Log LOG = LogFactory
.getLog(ContainersMonitorImpl.class);
private long monitoringInterval;
private MonitoringThread monitoringThread;
private boolean containerMetricsEnabled;
private long containerMetricsPeriodMs;
final List<ContainerId> containersToBeRemoved;
final Map<ContainerId, ProcessTreeInfo> containersToBeAdded;
Map<ContainerId, ProcessTreeInfo> trackingContainers =
new HashMap<ContainerId, ProcessTreeInfo>();
final ContainerExecutor containerExecutor;
private final Dispatcher eventDispatcher;
private final Context context;
private ResourceCalculatorPlugin resourceCalculatorPlugin;
private Configuration conf;
private Class<? extends ResourceCalculatorProcessTree> processTreeClass;
private long maxVmemAllottedForContainers = UNKNOWN_MEMORY_LIMIT;
private long maxPmemAllottedForContainers = UNKNOWN_MEMORY_LIMIT;
private boolean pmemCheckEnabled;
private boolean vmemCheckEnabled;
private long maxVCoresAllottedForContainers;
private static final long UNKNOWN_MEMORY_LIMIT = -1L;
private int nodeCpuPercentageForYARN;
private ResourceUtilization containersUtilization;
public ContainersMonitorImpl(ContainerExecutor exec,
AsyncDispatcher dispatcher, Context context) {
super("containers-monitor");
this.containerExecutor = exec;
this.eventDispatcher = dispatcher;
this.context = context;
this.containersToBeAdded = new HashMap<ContainerId, ProcessTreeInfo>();
this.containersToBeRemoved = new ArrayList<ContainerId>();
this.monitoringThread = new MonitoringThread();
this.containersUtilization = ResourceUtilization.newInstance(0, 0, 0.0f);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.monitoringInterval =
conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);
Class<? extends ResourceCalculatorPlugin> clazz =
conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
ResourceCalculatorPlugin.class);
this.resourceCalculatorPlugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
LOG.info(" Using ResourceCalculatorPlugin : "
+ this.resourceCalculatorPlugin);
processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
ResourceCalculatorProcessTree.class);
this.conf = conf;
LOG.info(" Using ResourceCalculatorProcessTree : "
+ this.processTreeClass);
this.containerMetricsEnabled =
conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
this.containerMetricsPeriodMs =
conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);
long configuredPMemForContainers =
NodeManagerHardwareUtils.getContainerMemoryMB(conf) * 1024 * 1024L;
long configuredVCoresForContainers =
NodeManagerHardwareUtils.getVCores(conf);
// Setting these irrespective of whether checks are enabled. Required in
// the UI.
// ///////// Physical memory configuration //////
this.maxPmemAllottedForContainers = configuredPMemForContainers;
this.maxVCoresAllottedForContainers = configuredVCoresForContainers;
// ///////// Virtual memory configuration //////
float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
Preconditions.checkArgument(vmemRatio > 0.99f,
YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
this.maxVmemAllottedForContainers =
(long) (vmemRatio * configuredPMemForContainers);
pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
nodeCpuPercentageForYARN =
NodeManagerHardwareUtils.getNodeCpuPercentage(conf);
if (pmemCheckEnabled) {
// Logging if actual pmem cannot be determined.
long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
if (this.resourceCalculatorPlugin != null) {
totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
.getPhysicalMemorySize();
if (totalPhysicalMemoryOnNM <= 0) {
LOG.warn("NodeManager's totalPmem could not be calculated. "
+ "Setting it to " + UNKNOWN_MEMORY_LIMIT);
totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
}
}
if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with "
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
"", 1)
+ " physical memory allocated to containers, which is more than "
+ "80% of the total physical memory available ("
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
1) + "). Thrashing might happen.");
}
}
super.serviceInit(conf);
}
private boolean isEnabled() {
if (resourceCalculatorPlugin == null) {
LOG.info("ResourceCalculatorPlugin is unavailable on this system. "
+ this.getClass().getName() + " is disabled.");
return false;
}
if (ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("0", processTreeClass, conf) == null) {
LOG.info("ResourceCalculatorProcessTree is unavailable on this system. "
+ this.getClass().getName() + " is disabled.");
return false;
}
if (!(isPmemCheckEnabled() || isVmemCheckEnabled())) {
LOG.info("Neither virutal-memory nor physical-memory monitoring is " +
"needed. Not running the monitor-thread");
return false;
}
return true;
}
@Override
protected void serviceStart() throws Exception {
if (this.isEnabled()) {
this.monitoringThread.start();
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (this.isEnabled()) {
this.monitoringThread.interrupt();
try {
this.monitoringThread.join();
} catch (InterruptedException e) {
;
}
}
super.serviceStop();
}
private static class ProcessTreeInfo {
private ContainerId containerId;
private String pid;
private ResourceCalculatorProcessTree pTree;
private long vmemLimit;
private long pmemLimit;
private int cpuVcores;
public ProcessTreeInfo(ContainerId containerId, String pid,
ResourceCalculatorProcessTree pTree, long vmemLimit, long pmemLimit,
int cpuVcores) {
this.containerId = containerId;
this.pid = pid;
this.pTree = pTree;
this.vmemLimit = vmemLimit;
this.pmemLimit = pmemLimit;
this.cpuVcores = cpuVcores;
}
public ContainerId getContainerId() {
return this.containerId;
}
public String getPID() {
return this.pid;
}
public void setPid(String pid) {
this.pid = pid;
}
public ResourceCalculatorProcessTree getProcessTree() {
return this.pTree;
}
public void setProcessTree(ResourceCalculatorProcessTree pTree) {
this.pTree = pTree;
}
public long getVmemLimit() {
return this.vmemLimit;
}
/**
* @return Physical memory limit for the process tree in bytes
*/
public long getPmemLimit() {
return this.pmemLimit;
}
/**
* Return the number of cpu vcores assigned
* @return
*/
public int getCpuVcores() {
return this.cpuVcores;
}
}
/**
* Check whether a container's process tree's current memory usage is over
* limit.
*
* When a java process exec's a program, it could momentarily account for
* double the size of it's memory, because the JVM does a fork()+exec()
* which at fork time creates a copy of the parent's memory. If the
* monitoring thread detects the memory used by the container tree at the
* same instance, it could assume it is over limit and kill the tree, for no
* fault of the process itself.
*
* We counter this problem by employing a heuristic check: - if a process
* tree exceeds the memory limit by more than twice, it is killed
* immediately - if a process tree has processes older than the monitoring
* interval exceeding the memory limit by even 1 time, it is killed. Else it
* is given the benefit of doubt to lie around for one more iteration.
*
* @param containerId
* Container Id for the container tree
* @param currentMemUsage
* Memory usage of a container tree
* @param curMemUsageOfAgedProcesses
* Memory usage of processes older than an iteration in a container
* tree
* @param vmemLimit
* The limit specified for the container
* @return true if the memory usage is more than twice the specified limit,
* or if processes in the tree, older than this thread's monitoring
* interval, exceed the memory limit. False, otherwise.
*/
boolean isProcessTreeOverLimit(String containerId,
long currentMemUsage,
long curMemUsageOfAgedProcesses,
long vmemLimit) {
boolean isOverLimit = false;
if (currentMemUsage > (2 * vmemLimit)) {
LOG.warn("Process tree for container: " + containerId
+ " running over twice " + "the configured limit. Limit=" + vmemLimit
+ ", current usage = " + currentMemUsage);
isOverLimit = true;
} else if (curMemUsageOfAgedProcesses > vmemLimit) {
LOG.warn("Process tree for container: " + containerId
+ " has processes older than 1 "
+ "iteration running over the configured limit. Limit=" + vmemLimit
+ ", current usage = " + curMemUsageOfAgedProcesses);
isOverLimit = true;
}
return isOverLimit;
}
// method provided just for easy testing purposes
boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree,
String containerId, long limit) {
long currentMemUsage = pTree.getVirtualMemorySize();
// as processes begin with an age 1, we want to see if there are processes
// more than 1 iteration old.
long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
return isProcessTreeOverLimit(containerId, currentMemUsage,
curMemUsageOfAgedProcesses, limit);
}
private class MonitoringThread extends Thread {
public MonitoringThread() {
super("Container Monitor");
}
@Override
public void run() {
while (true) {
// Print the processTrees for debugging.
if (LOG.isDebugEnabled()) {
StringBuilder tmp = new StringBuilder("[ ");
for (ProcessTreeInfo p : trackingContainers.values()) {
tmp.append(p.getPID());
tmp.append(" ");
}
LOG.debug("Current ProcessTree list : "
+ tmp.substring(0, tmp.length()) + "]");
}
// Add new containers
synchronized (containersToBeAdded) {
for (Entry<ContainerId, ProcessTreeInfo> entry : containersToBeAdded
.entrySet()) {
ContainerId containerId = entry.getKey();
ProcessTreeInfo processTreeInfo = entry.getValue();
LOG.info("Starting resource-monitoring for " + containerId);
trackingContainers.put(containerId, processTreeInfo);
}
containersToBeAdded.clear();
}
// Remove finished containers
synchronized (containersToBeRemoved) {
for (ContainerId containerId : containersToBeRemoved) {
if (containerMetricsEnabled) {
ContainerMetrics.forContainer(
containerId, containerMetricsPeriodMs).finished();
}
trackingContainers.remove(containerId);
LOG.info("Stopping resource-monitoring for " + containerId);
}
containersToBeRemoved.clear();
}
// Temporary structure to calculate the total resource utilization of
// the containers
ResourceUtilization trackedContainersUtilization =
ResourceUtilization.newInstance(0, 0, 0.0f);
// Now do the monitoring for the trackingContainers
// Check memory usage and kill any overflowing containers
long vmemUsageByAllContainers = 0;
long pmemByAllContainers = 0;
long cpuUsagePercentPerCoreByAllContainers = 0;
long cpuUsageTotalCoresByAllContainers = 0;
for (Iterator<Map.Entry<ContainerId, ProcessTreeInfo>> it =
trackingContainers.entrySet().iterator(); it.hasNext();) {
Map.Entry<ContainerId, ProcessTreeInfo> entry = it.next();
ContainerId containerId = entry.getKey();
ProcessTreeInfo ptInfo = entry.getValue();
try {
String pId = ptInfo.getPID();
// Initialize any uninitialized processTrees
if (pId == null) {
// get pid from ContainerId
pId = containerExecutor.getProcessId(ptInfo.getContainerId());
if (pId != null) {
// pId will be null, either if the container is not spawned yet
// or if the container's pid is removed from ContainerExecutor
LOG.debug("Tracking ProcessTree " + pId
+ " for the first time");
ResourceCalculatorProcessTree pt =
ResourceCalculatorProcessTree.getResourceCalculatorProcessTree(pId, processTreeClass, conf);
ptInfo.setPid(pId);
ptInfo.setProcessTree(pt);
if (containerMetricsEnabled) {
ContainerMetrics usageMetrics = ContainerMetrics
.forContainer(containerId, containerMetricsPeriodMs);
int cpuVcores = ptInfo.getCpuVcores();
final int vmemLimit = (int) (ptInfo.getVmemLimit() >> 20);
final int pmemLimit = (int) (ptInfo.getPmemLimit() >> 20);
usageMetrics.recordResourceLimit(
vmemLimit, pmemLimit, cpuVcores);
usageMetrics.recordProcessId(pId);
}
}
}
// End of initializing any uninitialized processTrees
if (pId == null) {
continue; // processTree cannot be tracked
}
LOG.debug("Constructing ProcessTree for : PID = " + pId
+ " ContainerId = " + containerId);
ResourceCalculatorProcessTree pTree = ptInfo.getProcessTree();
pTree.updateProcessTree(); // update process-tree
long currentVmemUsage = pTree.getVirtualMemorySize();
long currentPmemUsage = pTree.getRssMemorySize();
// if machine has 6 cores and 3 are used,
// cpuUsagePercentPerCore should be 300% and
// cpuUsageTotalCoresPercentage should be 50%
float cpuUsagePercentPerCore = pTree.getCpuUsagePercent();
float cpuUsageTotalCoresPercentage = cpuUsagePercentPerCore /
resourceCalculatorPlugin.getNumProcessors();
// Multiply by 1000 to avoid losing data when converting to int
int milliVcoresUsed = (int) (cpuUsageTotalCoresPercentage * 1000
* maxVCoresAllottedForContainers /nodeCpuPercentageForYARN);
// as processes begin with an age 1, we want to see if there
// are processes more than 1 iteration old.
long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
long vmemLimit = ptInfo.getVmemLimit();
long pmemLimit = ptInfo.getPmemLimit();
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(
"Memory usage of ProcessTree %s for container-id %s: ",
pId, containerId.toString()) +
formatUsageString(
currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit));
}
// Add resource utilization for this container
trackedContainersUtilization.addTo(
(int) (currentPmemUsage >> 20),
(int) (currentVmemUsage >> 20),
milliVcoresUsed / 1000.0f);
// Add usage to container metrics
if (containerMetricsEnabled) {
ContainerMetrics.forContainer(
containerId, containerMetricsPeriodMs).recordMemoryUsage(
(int) (currentPmemUsage >> 20));
ContainerMetrics.forContainer(
containerId, containerMetricsPeriodMs).recordCpuUsage
((int)cpuUsagePercentPerCore, milliVcoresUsed);
}
boolean isMemoryOverLimit = false;
String msg = "";
int containerExitStatus = ContainerExitStatus.INVALID;
if (isVmemCheckEnabled()
&& isProcessTreeOverLimit(containerId.toString(),
currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
// Container (the root process) is still alive and overflowing
// memory.
// Dump the process-tree and then clean it up.
msg = formatErrorMessage("virtual",
currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit,
pId, containerId, pTree);
isMemoryOverLimit = true;
containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
} else if (isPmemCheckEnabled()
&& isProcessTreeOverLimit(containerId.toString(),
currentPmemUsage, curRssMemUsageOfAgedProcesses,
pmemLimit)) {
// Container (the root process) is still alive and overflowing
// memory.
// Dump the process-tree and then clean it up.
msg = formatErrorMessage("physical",
currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit,
pId, containerId, pTree);
isMemoryOverLimit = true;
containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
}
// Accounting the total memory in usage for all containers
vmemUsageByAllContainers += currentVmemUsage;
pmemByAllContainers += currentPmemUsage;
// Accounting the total cpu usage for all containers
cpuUsagePercentPerCoreByAllContainers += cpuUsagePercentPerCore;
cpuUsageTotalCoresByAllContainers += cpuUsagePercentPerCore;
if (isMemoryOverLimit) {
// Virtual or physical memory over limit. Fail the container and
// remove
// the corresponding process tree
LOG.warn(msg);
// warn if not a leader
if (!pTree.checkPidPgrpidForMatch()) {
LOG.error("Killed container process with PID " + pId
+ " but it is not a process group leader.");
}
// kill the container
eventDispatcher.getEventHandler().handle(
new ContainerKillEvent(containerId,
containerExitStatus, msg));
it.remove();
LOG.info("Removed ProcessTree with root " + pId);
}
} catch (Exception e) {
// Log the exception and proceed to the next container.
LOG.warn("Uncaught exception in ContainerMemoryManager "
+ "while managing memory of " + containerId, e);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Total Resource Usage stats in NM by all containers : "
+ "Virtual Memory= " + vmemUsageByAllContainers
+ ", Physical Memory= " + pmemByAllContainers
+ ", Total CPU usage= " + cpuUsageTotalCoresByAllContainers
+ ", Total CPU(% per core) usage"
+ cpuUsagePercentPerCoreByAllContainers);
}
// Save the aggregated utilization of the containers
setContainersUtilization(trackedContainersUtilization);
try {
Thread.sleep(monitoringInterval);
} catch (InterruptedException e) {
LOG.warn(ContainersMonitorImpl.class.getName()
+ " is interrupted. Exiting.");
break;
}
}
}
private String formatErrorMessage(String memTypeExceeded,
long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit,
String pId, ContainerId containerId, ResourceCalculatorProcessTree pTree) {
return
String.format("Container [pid=%s,containerID=%s] is running beyond %s memory limits. ",
pId, containerId, memTypeExceeded) +
"Current usage: " +
formatUsageString(currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit) +
". Killing container.\n" +
"Dump of the process-tree for " + containerId + " :\n" +
pTree.getProcessTreeDump();
}
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
return String.format("%sB of %sB physical memory used; " +
"%sB of %sB virtual memory used",
TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
}
@Override
public long getVmemAllocatedForContainers() {
return this.maxVmemAllottedForContainers;
}
/**
* Is the total physical memory check enabled?
*
* @return true if total physical memory check is enabled.
*/
@Override
public boolean isPmemCheckEnabled() {
return this.pmemCheckEnabled;
}
@Override
public long getPmemAllocatedForContainers() {
return this.maxPmemAllottedForContainers;
}
@Override
public long getVCoresAllocatedForContainers() {
return this.maxVCoresAllottedForContainers;
}
/**
* Is the total virtual memory check enabled?
*
* @return true if total virtual memory check is enabled.
*/
@Override
public boolean isVmemCheckEnabled() {
return this.vmemCheckEnabled;
}
@Override
public ResourceUtilization getContainersUtilization() {
return this.containersUtilization;
}
public void setContainersUtilization(ResourceUtilization utilization) {
this.containersUtilization = utilization;
}
@Override
public void handle(ContainersMonitorEvent monitoringEvent) {
if (!isEnabled()) {
return;
}
ContainerId containerId = monitoringEvent.getContainerId();
switch (monitoringEvent.getType()) {
case START_MONITORING_CONTAINER:
ContainerStartMonitoringEvent startEvent =
(ContainerStartMonitoringEvent) monitoringEvent;
if (containerMetricsEnabled) {
ContainerMetrics usageMetrics = ContainerMetrics
.forContainer(containerId, containerMetricsPeriodMs);
usageMetrics.recordStateChangeDurations(
startEvent.getLaunchDuration(),
startEvent.getLocalizationDuration());
}
synchronized (this.containersToBeAdded) {
ProcessTreeInfo processTreeInfo =
new ProcessTreeInfo(containerId, null, null,
startEvent.getVmemLimit(), startEvent.getPmemLimit(),
startEvent.getCpuVcores());
this.containersToBeAdded.put(containerId, processTreeInfo);
}
break;
case STOP_MONITORING_CONTAINER:
synchronized (this.containersToBeRemoved) {
this.containersToBeRemoved.add(containerId);
}
break;
default:
// TODO: Wrong event.
}
}
}
| 26,829 | 38.282577 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
public enum ContainersMonitorEventType {
START_MONITORING_CONTAINER,
STOP_MONITORING_CONTAINER
}
| 970 | 37.84 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class ContainersMonitorEvent extends
AbstractEvent<ContainersMonitorEventType> {
private final ContainerId containerId;
public ContainersMonitorEvent(ContainerId containerId,
ContainersMonitorEventType eventType) {
super(eventType);
this.containerId = containerId;
}
public ContainerId getContainerId() {
return this.containerId;
}
}
| 1,353 | 32.85 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStartMonitoringEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.yarn.api.records.ContainerId;
public class ContainerStartMonitoringEvent extends ContainersMonitorEvent {
private final long vmemLimit;
private final long pmemLimit;
private final int cpuVcores;
private final long launchDuration;
private final long localizationDuration;
public ContainerStartMonitoringEvent(ContainerId containerId,
long vmemLimit, long pmemLimit, int cpuVcores, long launchDuration,
long localizationDuration) {
super(containerId, ContainersMonitorEventType.START_MONITORING_CONTAINER);
this.vmemLimit = vmemLimit;
this.pmemLimit = pmemLimit;
this.cpuVcores = cpuVcores;
this.launchDuration = launchDuration;
this.localizationDuration = localizationDuration;
}
public long getVmemLimit() {
return this.vmemLimit;
}
public long getPmemLimit() {
return this.pmemLimit;
}
public int getCpuVcores() {
return this.cpuVcores;
}
public long getLaunchDuration() {
return this.launchDuration;
}
public long getLocalizationDuration() {
return this.localizationDuration;
}
}
| 1,990 | 31.112903 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableStat;
import org.apache.hadoop.yarn.api.records.ContainerId;
import java.util.HashMap;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import static org.apache.hadoop.metrics2.lib.Interns.info;
@InterfaceAudience.Private
@Metrics(context="container")
public class ContainerMetrics implements MetricsSource {
public static final String PMEM_LIMIT_METRIC_NAME = "pMemLimitMBs";
public static final String VMEM_LIMIT_METRIC_NAME = "vMemLimitMBs";
public static final String VCORE_LIMIT_METRIC_NAME = "vCoreLimit";
public static final String PMEM_USAGE_METRIC_NAME = "pMemUsageMBs";
public static final String LAUNCH_DURATION_METRIC_NAME = "launchDurationMs";
public static final String LOCALIZATION_DURATION_METRIC_NAME =
"localizationDurationMs";
private static final String PHY_CPU_USAGE_METRIC_NAME = "pCpuUsagePercent";
// Use a multiplier of 1000 to avoid losing too much precision when
// converting to integers
private static final String VCORE_USAGE_METRIC_NAME = "milliVcoreUsage";
@Metric
public MutableStat pMemMBsStat;
// This tracks overall CPU percentage of the machine in terms of percentage
// of 1 core similar to top
// Thus if you use 2 cores completely out of 4 available cores this value
// will be 200
@Metric
public MutableStat cpuCoreUsagePercent;
@Metric
public MutableStat milliVcoresUsed;
@Metric
public MutableGaugeInt pMemLimitMbs;
@Metric
public MutableGaugeInt vMemLimitMbs;
@Metric
public MutableGaugeInt cpuVcoreLimit;
@Metric
public MutableGaugeLong launchDurationMs;
@Metric
public MutableGaugeLong localizationDurationMs;
static final MetricsInfo RECORD_INFO =
info("ContainerResource", "Resource limit and usage by container");
public static final MetricsInfo PROCESSID_INFO =
info("ContainerPid", "Container Process Id");
final MetricsInfo recordInfo;
final MetricsRegistry registry;
final ContainerId containerId;
final MetricsSystem metricsSystem;
// Metrics publishing status
private long flushPeriodMs;
private boolean flushOnPeriod = false; // true if period elapsed
private boolean finished = false; // true if container finished
private boolean unregister = false; // unregister
private Timer timer; // lazily initialized
/**
* Simple metrics cache to help prevent re-registrations.
*/
protected final static Map<ContainerId, ContainerMetrics>
usageMetrics = new HashMap<>();
ContainerMetrics(
MetricsSystem ms, ContainerId containerId, long flushPeriodMs) {
this.recordInfo =
info(sourceName(containerId), RECORD_INFO.description());
this.registry = new MetricsRegistry(recordInfo);
this.metricsSystem = ms;
this.containerId = containerId;
this.flushPeriodMs = flushPeriodMs;
scheduleTimerTaskIfRequired();
this.pMemMBsStat = registry.newStat(
PMEM_USAGE_METRIC_NAME, "Physical memory stats", "Usage", "MBs", true);
this.cpuCoreUsagePercent = registry.newStat(
PHY_CPU_USAGE_METRIC_NAME, "Physical Cpu core percent usage stats",
"Usage", "Percents", true);
this.milliVcoresUsed = registry.newStat(
VCORE_USAGE_METRIC_NAME, "1000 times Vcore usage", "Usage",
"MilliVcores", true);
this.pMemLimitMbs = registry.newGauge(
PMEM_LIMIT_METRIC_NAME, "Physical memory limit in MBs", 0);
this.vMemLimitMbs = registry.newGauge(
VMEM_LIMIT_METRIC_NAME, "Virtual memory limit in MBs", 0);
this.cpuVcoreLimit = registry.newGauge(
VCORE_LIMIT_METRIC_NAME, "CPU limit in number of vcores", 0);
this.launchDurationMs = registry.newGauge(
LAUNCH_DURATION_METRIC_NAME, "Launch duration in MS", 0L);
this.localizationDurationMs = registry.newGauge(
LOCALIZATION_DURATION_METRIC_NAME, "Localization duration in MS", 0L);
}
ContainerMetrics tag(MetricsInfo info, ContainerId containerId) {
registry.tag(info, containerId.toString());
return this;
}
static String sourceName(ContainerId containerId) {
return RECORD_INFO.name() + "_" + containerId.toString();
}
public static ContainerMetrics forContainer(
ContainerId containerId, long flushPeriodMs) {
return forContainer(
DefaultMetricsSystem.instance(), containerId, flushPeriodMs);
}
synchronized static ContainerMetrics forContainer(
MetricsSystem ms, ContainerId containerId, long flushPeriodMs) {
ContainerMetrics metrics = usageMetrics.get(containerId);
if (metrics == null) {
metrics = new ContainerMetrics(
ms, containerId, flushPeriodMs).tag(RECORD_INFO, containerId);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(containerId),
"Metrics for container: " + containerId, metrics);
}
usageMetrics.put(containerId, metrics);
}
return metrics;
}
@Override
public synchronized void getMetrics(MetricsCollector collector, boolean all) {
//Container goes through registered -> finished -> unregistered.
if (unregister) {
metricsSystem.unregisterSource(recordInfo.name());
usageMetrics.remove(containerId);
return;
}
if (finished || flushOnPeriod) {
registry.snapshot(collector.addRecord(registry.info()), all);
}
if (finished) {
this.unregister = true;
} else if (flushOnPeriod) {
flushOnPeriod = false;
scheduleTimerTaskIfRequired();
}
}
public synchronized void finished() {
this.finished = true;
if (timer != null) {
timer.cancel();
timer = null;
}
}
public void recordMemoryUsage(int memoryMBs) {
if (memoryMBs >= 0) {
this.pMemMBsStat.add(memoryMBs);
}
}
public void recordCpuUsage(
int totalPhysicalCpuPercent, int milliVcoresUsed) {
if (totalPhysicalCpuPercent >=0) {
this.cpuCoreUsagePercent.add(totalPhysicalCpuPercent);
}
if (milliVcoresUsed >= 0) {
this.milliVcoresUsed.add(milliVcoresUsed);
}
}
public void recordProcessId(String processId) {
registry.tag(PROCESSID_INFO, processId);
}
public void recordResourceLimit(int vmemLimit, int pmemLimit, int cpuVcores) {
this.vMemLimitMbs.set(vmemLimit);
this.pMemLimitMbs.set(pmemLimit);
this.cpuVcoreLimit.set(cpuVcores);
}
public void recordStateChangeDurations(long launchDuration,
long localizationDuration) {
this.launchDurationMs.set(launchDuration);
this.localizationDurationMs.set(localizationDuration);
}
private synchronized void scheduleTimerTaskIfRequired() {
if (flushPeriodMs > 0) {
// Lazily initialize timer
if (timer == null) {
this.timer = new Timer("Metrics flush checker", true);
}
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
synchronized (ContainerMetrics.this) {
if (!finished) {
flushOnPeriod = true;
}
}
}
};
timer.schedule(timerTask, flushPeriodMs);
}
}
}
| 8,622 | 32.683594 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerStopMonitoringEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.yarn.api.records.ContainerId;
public class ContainerStopMonitoringEvent extends ContainersMonitorEvent {
public ContainerStopMonitoringEvent(ContainerId containerId) {
super(containerId, ContainersMonitorEventType.STOP_MONITORING_CONTAINER);
}
}
| 1,167 | 37.933333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
import org.apache.hadoop.yarn.api.records.ContainerId;
public interface AppLogAggregator extends Runnable {
void startContainerLogAggregation(ContainerId containerId,
boolean wasContainerSuccessful);
void abortLogAggregation();
void finishLogAggregation();
}
| 1,151 | 35 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class LogAggregationService extends AbstractService implements
LogHandler {
private static final Log LOG = LogFactory
.getLog(LogAggregationService.class);
/*
* Expected deployment TLD will be 1777, owner=<NMOwner>, group=<NMGroup -
* Group to which NMOwner belongs> App dirs will be created as 770,
* owner=<AppOwner>, group=<NMGroup>: so that the owner and <NMOwner> can
* access / modify the files.
* <NMGroup> should obviously be a limited access group.
*/
/**
* Permissions for the top level directory under which app directories will be
* created.
*/
private static final FsPermission TLDIR_PERMISSIONS = FsPermission
.createImmutable((short) 01777);
/**
* Permissions for the Application directory.
*/
private static final FsPermission APP_DIR_PERMISSIONS = FsPermission
.createImmutable((short) 0770);
private final Context context;
private final DeletionService deletionService;
private final Dispatcher dispatcher;
private LocalDirsHandlerService dirsHandler;
Path remoteRootLogDir;
String remoteRootLogDirSuffix;
private NodeId nodeId;
private final ConcurrentMap<ApplicationId, AppLogAggregator> appLogAggregators;
private final ExecutorService threadPool;
public LogAggregationService(Dispatcher dispatcher, Context context,
DeletionService deletionService, LocalDirsHandlerService dirsHandler) {
super(LogAggregationService.class.getName());
this.dispatcher = dispatcher;
this.context = context;
this.deletionService = deletionService;
this.dirsHandler = dirsHandler;
this.appLogAggregators =
new ConcurrentHashMap<ApplicationId, AppLogAggregator>();
this.threadPool = Executors.newCachedThreadPool(
new ThreadFactoryBuilder()
.setNameFormat("LogAggregationService #%d")
.build());
}
protected void serviceInit(Configuration conf) throws Exception {
this.remoteRootLogDir =
new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
this.remoteRootLogDirSuffix =
conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
// NodeId is only available during start, the following cannot be moved
// anywhere else.
this.nodeId = this.context.getNodeId();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
LOG.info(this.getName() + " waiting for pending aggregation during exit");
stopAggregators();
super.serviceStop();
}
private void stopAggregators() {
threadPool.shutdown();
boolean supervised = getConfig().getBoolean(
YarnConfiguration.NM_RECOVERY_SUPERVISED,
YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED);
// if recovery on restart is supported then leave outstanding aggregations
// to the next restart
boolean shouldAbort = context.getNMStateStore().canRecover()
&& !context.getDecommissioned() && supervised;
// politely ask to finish
for (AppLogAggregator aggregator : appLogAggregators.values()) {
if (shouldAbort) {
aggregator.abortLogAggregation();
} else {
aggregator.finishLogAggregation();
}
}
while (!threadPool.isTerminated()) { // wait for all threads to finish
for (ApplicationId appId : appLogAggregators.keySet()) {
LOG.info("Waiting for aggregation to complete for " + appId);
}
try {
if (!threadPool.awaitTermination(30, TimeUnit.SECONDS)) {
threadPool.shutdownNow(); // send interrupt to hurry them along
}
} catch (InterruptedException e) {
LOG.warn("Aggregation stop interrupted!");
break;
}
}
for (ApplicationId appId : appLogAggregators.keySet()) {
LOG.warn("Some logs may not have been aggregated for " + appId);
}
}
protected FileSystem getFileSystem(Configuration conf) throws IOException {
return this.remoteRootLogDir.getFileSystem(conf);
}
void verifyAndCreateRemoteLogDir(Configuration conf) {
// Checking the existence of the TLD
FileSystem remoteFS = null;
try {
remoteFS = getFileSystem(conf);
} catch (IOException e) {
throw new YarnRuntimeException("Unable to get Remote FileSystem instance", e);
}
boolean remoteExists = true;
try {
FsPermission perms =
remoteFS.getFileStatus(this.remoteRootLogDir).getPermission();
if (!perms.equals(TLDIR_PERMISSIONS)) {
LOG.warn("Remote Root Log Dir [" + this.remoteRootLogDir
+ "] already exist, but with incorrect permissions. "
+ "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
+ "]." + " The cluster may have problems with multiple users.");
}
} catch (FileNotFoundException e) {
remoteExists = false;
} catch (IOException e) {
throw new YarnRuntimeException(
"Failed to check permissions for dir ["
+ this.remoteRootLogDir + "]", e);
}
if (!remoteExists) {
LOG.warn("Remote Root Log Dir [" + this.remoteRootLogDir
+ "] does not exist. Attempting to create it.");
try {
Path qualified =
this.remoteRootLogDir.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
} catch (IOException e) {
throw new YarnRuntimeException("Failed to create remoteLogDir ["
+ this.remoteRootLogDir + "]", e);
}
}
}
Path getRemoteNodeLogFileForApp(ApplicationId appId, String user) {
return LogAggregationUtils.getRemoteNodeLogFileForApp(
this.remoteRootLogDir, appId, user, this.nodeId,
this.remoteRootLogDirSuffix);
}
Path getRemoteAppLogDir(ApplicationId appId, String user) {
return LogAggregationUtils.getRemoteAppLogDir(this.remoteRootLogDir, appId,
user, this.remoteRootLogDirSuffix);
}
private void createDir(FileSystem fs, Path path, FsPermission fsPerm)
throws IOException {
FsPermission dirPerm = new FsPermission(fsPerm);
fs.mkdirs(path, dirPerm);
FsPermission umask = FsPermission.getUMask(fs.getConf());
if (!dirPerm.equals(dirPerm.applyUMask(umask))) {
fs.setPermission(path, new FsPermission(fsPerm));
}
}
private boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
throws IOException {
boolean exists = true;
try {
FileStatus appDirStatus = fs.getFileStatus(path);
if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
fs.setPermission(path, APP_DIR_PERMISSIONS);
}
} catch (FileNotFoundException fnfe) {
exists = false;
}
return exists;
}
protected void createAppDir(final String user, final ApplicationId appId,
UserGroupInformation userUgi) {
try {
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
try {
// TODO: Reuse FS for user?
FileSystem remoteFS = getFileSystem(getConfig());
// Only creating directories if they are missing to avoid
// unnecessary load on the filesystem from all of the nodes
Path appDir = LogAggregationUtils.getRemoteAppLogDir(
LogAggregationService.this.remoteRootLogDir, appId, user,
LogAggregationService.this.remoteRootLogDirSuffix);
appDir = appDir.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
if (!checkExists(remoteFS, appDir, APP_DIR_PERMISSIONS)) {
Path suffixDir = LogAggregationUtils.getRemoteLogSuffixedDir(
LogAggregationService.this.remoteRootLogDir, user,
LogAggregationService.this.remoteRootLogDirSuffix);
suffixDir = suffixDir.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
if (!checkExists(remoteFS, suffixDir, APP_DIR_PERMISSIONS)) {
Path userDir = LogAggregationUtils.getRemoteLogUserDir(
LogAggregationService.this.remoteRootLogDir, user);
userDir = userDir.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory());
if (!checkExists(remoteFS, userDir, APP_DIR_PERMISSIONS)) {
createDir(remoteFS, userDir, APP_DIR_PERMISSIONS);
}
createDir(remoteFS, suffixDir, APP_DIR_PERMISSIONS);
}
createDir(remoteFS, appDir, APP_DIR_PERMISSIONS);
}
} catch (IOException e) {
LOG.error("Failed to setup application log directory for "
+ appId, e);
throw e;
}
return null;
}
});
} catch (Exception e) {
throw new YarnRuntimeException(e);
}
}
@SuppressWarnings("unchecked")
private void initApp(final ApplicationId appId, String user,
Credentials credentials, ContainerLogsRetentionPolicy logRetentionPolicy,
Map<ApplicationAccessType, String> appAcls,
LogAggregationContext logAggregationContext) {
ApplicationEvent eventResponse;
try {
verifyAndCreateRemoteLogDir(getConfig());
initAppAggregator(appId, user, credentials, logRetentionPolicy, appAcls,
logAggregationContext);
eventResponse = new ApplicationEvent(appId,
ApplicationEventType.APPLICATION_LOG_HANDLING_INITED);
} catch (YarnRuntimeException e) {
LOG.warn("Application failed to init aggregation", e);
eventResponse = new ApplicationEvent(appId,
ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED);
}
this.dispatcher.getEventHandler().handle(eventResponse);
}
FileContext getLocalFileContext(Configuration conf) {
try {
return FileContext.getLocalFSFileContext(conf);
} catch (IOException e) {
throw new YarnRuntimeException("Failed to access local fs");
}
}
protected void initAppAggregator(final ApplicationId appId, String user,
Credentials credentials, ContainerLogsRetentionPolicy logRetentionPolicy,
Map<ApplicationAccessType, String> appAcls,
LogAggregationContext logAggregationContext) {
// Get user's FileSystem credentials
final UserGroupInformation userUgi =
UserGroupInformation.createRemoteUser(user);
if (credentials != null) {
userUgi.addCredentials(credentials);
}
// New application
final AppLogAggregator appLogAggregator =
new AppLogAggregatorImpl(this.dispatcher, this.deletionService,
getConfig(), appId, userUgi, this.nodeId, dirsHandler,
getRemoteNodeLogFileForApp(appId, user), logRetentionPolicy,
appAcls, logAggregationContext, this.context,
getLocalFileContext(getConfig()));
if (this.appLogAggregators.putIfAbsent(appId, appLogAggregator) != null) {
throw new YarnRuntimeException("Duplicate initApp for " + appId);
}
// wait until check for existing aggregator to create dirs
try {
// Create the app dir
createAppDir(user, appId, userUgi);
} catch (Exception e) {
appLogAggregators.remove(appId);
closeFileSystems(userUgi);
if (!(e instanceof YarnRuntimeException)) {
e = new YarnRuntimeException(e);
}
throw (YarnRuntimeException)e;
}
// TODO Get the user configuration for the list of containers that need log
// aggregation.
// Schedule the aggregator.
Runnable aggregatorWrapper = new Runnable() {
public void run() {
try {
appLogAggregator.run();
} finally {
appLogAggregators.remove(appId);
closeFileSystems(userUgi);
}
}
};
this.threadPool.execute(aggregatorWrapper);
}
protected void closeFileSystems(final UserGroupInformation userUgi) {
try {
FileSystem.closeAllForUGI(userUgi);
} catch (IOException e) {
LOG.warn("Failed to close filesystems: ", e);
}
}
// for testing only
@Private
int getNumAggregators() {
return this.appLogAggregators.size();
}
private void stopContainer(ContainerId containerId, int exitCode) {
// A container is complete. Put this containers' logs up for aggregation if
// this containers' logs are needed.
AppLogAggregator aggregator = this.appLogAggregators.get(
containerId.getApplicationAttemptId().getApplicationId());
if (aggregator == null) {
LOG.warn("Log aggregation is not initialized for " + containerId
+ ", did it fail to start?");
return;
}
aggregator.startContainerLogAggregation(containerId, exitCode == 0);
}
private void stopApp(ApplicationId appId) {
// App is complete. Finish up any containers' pending log aggregation and
// close the application specific logFile.
AppLogAggregator aggregator = this.appLogAggregators.get(appId);
if (aggregator == null) {
LOG.warn("Log aggregation is not initialized for " + appId
+ ", did it fail to start?");
return;
}
aggregator.finishLogAggregation();
}
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartEvent =
(LogHandlerAppStartedEvent) event;
initApp(appStartEvent.getApplicationId(), appStartEvent.getUser(),
appStartEvent.getCredentials(),
appStartEvent.getLogRetentionPolicy(),
appStartEvent.getApplicationAcls(),
appStartEvent.getLogAggregationContext());
break;
case CONTAINER_FINISHED:
LogHandlerContainerFinishedEvent containerFinishEvent =
(LogHandlerContainerFinishedEvent) event;
stopContainer(containerFinishEvent.getContainerId(),
containerFinishEvent.getExitCode());
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
stopApp(appFinishedEvent.getApplicationId());
break;
default:
; // Ignore
}
}
@VisibleForTesting
public ConcurrentMap<ApplicationId, AppLogAggregator> getAppLogAggregators() {
return this.appLogAggregators;
}
@VisibleForTesting
public NodeId getNodeId() {
return this.nodeId;
}
}
| 18,213 | 37.025052 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.Times;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
public class AppLogAggregatorImpl implements AppLogAggregator {
private static final Log LOG = LogFactory
.getLog(AppLogAggregatorImpl.class);
private static final int THREAD_SLEEP_TIME = 1000;
// This is temporary solution. The configuration will be deleted once
// we find a more scalable method to only write a single log file per LRS.
private static final String NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP
= YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app";
private static final int
DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP = 30;
// This configuration is for debug and test purpose. By setting
// this configuration as true. We can break the lower bound of
// NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS.
private static final String NM_LOG_AGGREGATION_DEBUG_ENABLED
= YarnConfiguration.NM_PREFIX + "log-aggregation.debug-enabled";
private static final boolean
DEFAULT_NM_LOG_AGGREGATION_DEBUG_ENABLED = false;
private static final long
NM_LOG_AGGREGATION_MIN_ROLL_MONITORING_INTERVAL_SECONDS = 3600;
private final LocalDirsHandlerService dirsHandler;
private final Dispatcher dispatcher;
private final ApplicationId appId;
private final String applicationId;
private boolean logAggregationDisabled = false;
private final Configuration conf;
private final DeletionService delService;
private final UserGroupInformation userUgi;
private final Path remoteNodeLogFileForApp;
private final Path remoteNodeTmpLogFileForApp;
private final ContainerLogsRetentionPolicy retentionPolicy;
private final BlockingQueue<ContainerId> pendingContainers;
private final AtomicBoolean appFinishing = new AtomicBoolean();
private final AtomicBoolean appAggregationFinished = new AtomicBoolean();
private final AtomicBoolean aborted = new AtomicBoolean();
private final Map<ApplicationAccessType, String> appAcls;
private final FileContext lfs;
private final LogAggregationContext logAggregationContext;
private final Context context;
private final int retentionSize;
private final long rollingMonitorInterval;
private final boolean logAggregationInRolling;
private final NodeId nodeId;
// This variable is only for testing
private final AtomicBoolean waiting = new AtomicBoolean(false);
private boolean renameTemporaryLogFileFailed = false;
private final Map<ContainerId, ContainerLogAggregator> containerLogAggregators =
new HashMap<ContainerId, ContainerLogAggregator>();
public AppLogAggregatorImpl(Dispatcher dispatcher,
DeletionService deletionService, Configuration conf,
ApplicationId appId, UserGroupInformation userUgi, NodeId nodeId,
LocalDirsHandlerService dirsHandler, Path remoteNodeLogFileForApp,
ContainerLogsRetentionPolicy retentionPolicy,
Map<ApplicationAccessType, String> appAcls,
LogAggregationContext logAggregationContext, Context context,
FileContext lfs) {
this.dispatcher = dispatcher;
this.conf = conf;
this.delService = deletionService;
this.appId = appId;
this.applicationId = ConverterUtils.toString(appId);
this.userUgi = userUgi;
this.dirsHandler = dirsHandler;
this.remoteNodeLogFileForApp = remoteNodeLogFileForApp;
this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
this.retentionPolicy = retentionPolicy;
this.pendingContainers = new LinkedBlockingQueue<ContainerId>();
this.appAcls = appAcls;
this.lfs = lfs;
this.logAggregationContext = logAggregationContext;
this.context = context;
this.nodeId = nodeId;
int configuredRentionSize =
conf.getInt(NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
if (configuredRentionSize <= 0) {
this.retentionSize =
DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
} else {
this.retentionSize = configuredRentionSize;
}
long configuredRollingMonitorInterval = conf.getLong(
YarnConfiguration
.NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS,
YarnConfiguration
.DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS);
boolean debug_mode =
conf.getBoolean(NM_LOG_AGGREGATION_DEBUG_ENABLED,
DEFAULT_NM_LOG_AGGREGATION_DEBUG_ENABLED);
if (configuredRollingMonitorInterval > 0
&& configuredRollingMonitorInterval <
NM_LOG_AGGREGATION_MIN_ROLL_MONITORING_INTERVAL_SECONDS) {
if (debug_mode) {
this.rollingMonitorInterval = configuredRollingMonitorInterval;
} else {
LOG.warn(
"rollingMonitorIntervall should be more than or equal to "
+ NM_LOG_AGGREGATION_MIN_ROLL_MONITORING_INTERVAL_SECONDS
+ " seconds. Using "
+ NM_LOG_AGGREGATION_MIN_ROLL_MONITORING_INTERVAL_SECONDS
+ " seconds instead.");
this.rollingMonitorInterval =
NM_LOG_AGGREGATION_MIN_ROLL_MONITORING_INTERVAL_SECONDS;
}
} else {
if (configuredRollingMonitorInterval <= 0) {
LOG.warn("rollingMonitorInterval is set as "
+ configuredRollingMonitorInterval + ". "
+ "The log rolling mornitoring interval is disabled. "
+ "The logs will be aggregated after this application is finished.");
} else {
LOG.warn("rollingMonitorInterval is set as "
+ configuredRollingMonitorInterval + ". "
+ "The logs will be aggregated every "
+ configuredRollingMonitorInterval + " seconds");
}
this.rollingMonitorInterval = configuredRollingMonitorInterval;
}
this.logAggregationInRolling =
this.rollingMonitorInterval <= 0 || this.logAggregationContext == null
|| this.logAggregationContext.getRolledLogsIncludePattern() == null
|| this.logAggregationContext.getRolledLogsIncludePattern()
.isEmpty() ? false : true;
}
private void uploadLogsForContainers(boolean appFinished) {
if (this.logAggregationDisabled) {
return;
}
if (UserGroupInformation.isSecurityEnabled()) {
Credentials systemCredentials =
context.getSystemCredentialsForApps().get(appId);
if (systemCredentials != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding new framework-token for " + appId
+ " for log-aggregation: " + systemCredentials.getAllTokens()
+ "; userUgi=" + userUgi);
}
// this will replace old token
userUgi.addCredentials(systemCredentials);
}
}
// Create a set of Containers whose logs will be uploaded in this cycle.
// It includes:
// a) all containers in pendingContainers: those containers are finished
// and satisfy the retentionPolicy.
// b) some set of running containers: For all the Running containers,
// we have ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY,
// so simply set wasContainerSuccessful as true to
// bypass FAILED_CONTAINERS check and find the running containers
// which satisfy the retentionPolicy.
Set<ContainerId> pendingContainerInThisCycle = new HashSet<ContainerId>();
this.pendingContainers.drainTo(pendingContainerInThisCycle);
Set<ContainerId> finishedContainers =
new HashSet<ContainerId>(pendingContainerInThisCycle);
if (this.context.getApplications().get(this.appId) != null) {
for (ContainerId container : this.context.getApplications()
.get(this.appId).getContainers().keySet()) {
if (shouldUploadLogs(container, true)) {
pendingContainerInThisCycle.add(container);
}
}
}
LogWriter writer = null;
try {
try {
writer =
new LogWriter(this.conf, this.remoteNodeTmpLogFileForApp,
this.userUgi);
// Write ACLs once when the writer is created.
writer.writeApplicationACLs(appAcls);
writer.writeApplicationOwner(this.userUgi.getShortUserName());
} catch (IOException e1) {
LOG.error("Cannot create writer for app " + this.applicationId
+ ". Skip log upload this time. ", e1);
return;
}
boolean uploadedLogsInThisCycle = false;
for (ContainerId container : pendingContainerInThisCycle) {
ContainerLogAggregator aggregator = null;
if (containerLogAggregators.containsKey(container)) {
aggregator = containerLogAggregators.get(container);
} else {
aggregator = new ContainerLogAggregator(container);
containerLogAggregators.put(container, aggregator);
}
Set<Path> uploadedFilePathsInThisCycle =
aggregator.doContainerLogAggregation(writer, appFinished);
if (uploadedFilePathsInThisCycle.size() > 0) {
uploadedLogsInThisCycle = true;
this.delService.delete(this.userUgi.getShortUserName(), null,
uploadedFilePathsInThisCycle
.toArray(new Path[uploadedFilePathsInThisCycle.size()]));
}
// This container is finished, and all its logs have been uploaded,
// remove it from containerLogAggregators.
if (finishedContainers.contains(container)) {
containerLogAggregators.remove(container);
}
}
// Before upload logs, make sure the number of existing logs
// is smaller than the configured NM log aggregation retention size.
if (uploadedLogsInThisCycle) {
cleanOldLogs();
}
if (writer != null) {
writer.close();
}
long currentTime = System.currentTimeMillis();
final Path renamedPath = this.rollingMonitorInterval <= 0
? remoteNodeLogFileForApp : new Path(
remoteNodeLogFileForApp.getParent(),
remoteNodeLogFileForApp.getName() + "_"
+ currentTime);
String diagnosticMessage = "";
boolean logAggregationSucceedInThisCycle = true;
final boolean rename = uploadedLogsInThisCycle;
try {
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
if (remoteFS.exists(remoteNodeTmpLogFileForApp)) {
if (rename) {
remoteFS.rename(remoteNodeTmpLogFileForApp, renamedPath);
} else {
remoteFS.delete(remoteNodeTmpLogFileForApp, false);
}
}
return null;
}
});
diagnosticMessage =
"Log uploaded successfully for Application: " + appId
+ " in NodeManager: "
+ LogAggregationUtils.getNodeString(nodeId) + " at "
+ Times.format(currentTime) + "\n";
} catch (Exception e) {
LOG.error(
"Failed to move temporary log file to final location: ["
+ remoteNodeTmpLogFileForApp + "] to ["
+ renamedPath + "]", e);
diagnosticMessage =
"Log uploaded failed for Application: " + appId
+ " in NodeManager: "
+ LogAggregationUtils.getNodeString(nodeId) + " at "
+ Times.format(currentTime) + "\n";
renameTemporaryLogFileFailed = true;
logAggregationSucceedInThisCycle = false;
}
LogAggregationReport report =
Records.newRecord(LogAggregationReport.class);
report.setApplicationId(appId);
report.setDiagnosticMessage(diagnosticMessage);
report.setLogAggregationStatus(logAggregationSucceedInThisCycle
? LogAggregationStatus.RUNNING
: LogAggregationStatus.RUNNING_WITH_FAILURE);
this.context.getLogAggregationStatusForApps().add(report);
if (appFinished) {
// If the app is finished, one extra final report with log aggregation
// status SUCCEEDED/FAILED will be sent to RM to inform the RM
// that the log aggregation in this NM is completed.
LogAggregationReport finalReport =
Records.newRecord(LogAggregationReport.class);
finalReport.setApplicationId(appId);
finalReport.setLogAggregationStatus(renameTemporaryLogFileFailed
? LogAggregationStatus.FAILED : LogAggregationStatus.SUCCEEDED);
this.context.getLogAggregationStatusForApps().add(finalReport);
}
} finally {
if (writer != null) {
writer.close();
}
}
}
private void cleanOldLogs() {
try {
final FileSystem remoteFS =
this.remoteNodeLogFileForApp.getFileSystem(conf);
Path appDir =
this.remoteNodeLogFileForApp.getParent().makeQualified(
remoteFS.getUri(), remoteFS.getWorkingDirectory());
Set<FileStatus> status =
new HashSet<FileStatus>(Arrays.asList(remoteFS.listStatus(appDir)));
Iterable<FileStatus> mask =
Iterables.filter(status, new Predicate<FileStatus>() {
@Override
public boolean apply(FileStatus next) {
return next.getPath().getName()
.contains(LogAggregationUtils.getNodeString(nodeId))
&& !next.getPath().getName().endsWith(
LogAggregationUtils.TMP_FILE_SUFFIX);
}
});
status = Sets.newHashSet(mask);
// Normally, we just need to delete one oldest log
// before we upload a new log.
// If we can not delete the older logs in this cycle,
// we will delete them in next cycle.
if (status.size() >= this.retentionSize) {
// sort by the lastModificationTime ascending
List<FileStatus> statusList = new ArrayList<FileStatus>(status);
Collections.sort(statusList, new Comparator<FileStatus>() {
public int compare(FileStatus s1, FileStatus s2) {
return s1.getModificationTime() < s2.getModificationTime() ? -1
: s1.getModificationTime() > s2.getModificationTime() ? 1 : 0;
}
});
for (int i = 0 ; i <= statusList.size() - this.retentionSize; i++) {
final FileStatus remove = statusList.get(i);
try {
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
remoteFS.delete(remove.getPath(), false);
return null;
}
});
} catch (Exception e) {
LOG.error("Failed to delete " + remove.getPath(), e);
}
}
}
} catch (Exception e) {
LOG.error("Failed to clean old logs", e);
}
}
@Override
public void run() {
try {
doAppLogAggregation();
} catch (Exception e) {
// do post clean up of log directories on any exception
LOG.error("Error occured while aggregating the log for the application "
+ appId, e);
doAppLogAggregationPostCleanUp();
} finally {
if (!this.appAggregationFinished.get()) {
LOG.warn("Aggregation did not complete for application " + appId);
}
this.appAggregationFinished.set(true);
}
}
@SuppressWarnings("unchecked")
private void doAppLogAggregation() {
while (!this.appFinishing.get() && !this.aborted.get()) {
synchronized(this) {
try {
waiting.set(true);
if (logAggregationInRolling) {
wait(this.rollingMonitorInterval * 1000);
if (this.appFinishing.get() || this.aborted.get()) {
break;
}
uploadLogsForContainers(false);
} else {
wait(THREAD_SLEEP_TIME);
}
} catch (InterruptedException e) {
LOG.warn("PendingContainers queue is interrupted");
this.appFinishing.set(true);
}
}
}
if (this.aborted.get()) {
return;
}
// App is finished, upload the container logs.
uploadLogsForContainers(true);
doAppLogAggregationPostCleanUp();
this.dispatcher.getEventHandler().handle(
new ApplicationEvent(this.appId,
ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
this.appAggregationFinished.set(true);
}
private void doAppLogAggregationPostCleanUp() {
// Remove the local app-log-dirs
List<Path> localAppLogDirs = new ArrayList<Path>();
for (String rootLogDir : dirsHandler.getLogDirsForCleanup()) {
Path logPath = new Path(rootLogDir, applicationId);
try {
// check if log dir exists
lfs.getFileStatus(logPath);
localAppLogDirs.add(logPath);
} catch (UnsupportedFileSystemException ue) {
LOG.warn("Log dir " + rootLogDir + "is an unsupported file system", ue);
continue;
} catch (IOException fe) {
continue;
}
}
if (localAppLogDirs.size() > 0) {
this.delService.delete(this.userUgi.getShortUserName(), null,
localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
}
}
private Path getRemoteNodeTmpLogFileForApp() {
return new Path(remoteNodeLogFileForApp.getParent(),
(remoteNodeLogFileForApp.getName() + LogAggregationUtils.TMP_FILE_SUFFIX));
}
// TODO: The condition: containerId.getId() == 1 to determine an AM container
// is not always true.
private boolean shouldUploadLogs(ContainerId containerId,
boolean wasContainerSuccessful) {
// All containers
if (this.retentionPolicy
.equals(ContainerLogsRetentionPolicy.ALL_CONTAINERS)) {
return true;
}
// AM Container only
if (this.retentionPolicy
.equals(ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY)) {
if ((containerId.getContainerId()
& ContainerId.CONTAINER_ID_BITMASK)== 1) {
return true;
}
return false;
}
// AM + Failing containers
if (this.retentionPolicy
.equals(ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY)) {
if ((containerId.getContainerId()
& ContainerId.CONTAINER_ID_BITMASK) == 1) {
return true;
} else if(!wasContainerSuccessful) {
return true;
}
return false;
}
return false;
}
@Override
public void startContainerLogAggregation(ContainerId containerId,
boolean wasContainerSuccessful) {
if (shouldUploadLogs(containerId, wasContainerSuccessful)) {
LOG.info("Considering container " + containerId
+ " for log-aggregation");
this.pendingContainers.add(containerId);
}
}
@Override
public synchronized void finishLogAggregation() {
LOG.info("Application just finished : " + this.applicationId);
this.appFinishing.set(true);
this.notifyAll();
}
@Override
public synchronized void abortLogAggregation() {
LOG.info("Aborting log aggregation for " + this.applicationId);
this.aborted.set(true);
this.notifyAll();
}
@Private
@VisibleForTesting
// This is only used for testing.
// This will wake the log aggregation thread that is waiting for
// rollingMonitorInterval.
// To use this method, make sure the log aggregation thread is running
// and waiting for rollingMonitorInterval.
public synchronized void doLogAggregationOutOfBand() {
while(!waiting.get()) {
try {
wait(200);
} catch (InterruptedException e) {
// Do Nothing
}
}
LOG.info("Do OutOfBand log aggregation");
this.notifyAll();
}
private class ContainerLogAggregator {
private final ContainerId containerId;
private Set<String> uploadedFileMeta =
new HashSet<String>();
public ContainerLogAggregator(ContainerId containerId) {
this.containerId = containerId;
}
public Set<Path> doContainerLogAggregation(LogWriter writer,
boolean appFinished) {
LOG.info("Uploading logs for container " + containerId
+ ". Current good log dirs are "
+ StringUtils.join(",", dirsHandler.getLogDirsForRead()));
final LogKey logKey = new LogKey(containerId);
final LogValue logValue =
new LogValue(dirsHandler.getLogDirsForRead(), containerId,
userUgi.getShortUserName(), logAggregationContext,
this.uploadedFileMeta, appFinished);
try {
writer.append(logKey, logValue);
} catch (Exception e) {
LOG.error("Couldn't upload logs for " + containerId
+ ". Skipping this container.", e);
return new HashSet<Path>();
}
this.uploadedFileMeta.addAll(logValue
.getCurrentUpLoadedFileMeta());
// if any of the previous uploaded logs have been deleted,
// we need to remove them from alreadyUploadedLogs
Iterable<String> mask =
Iterables.filter(uploadedFileMeta, new Predicate<String>() {
@Override
public boolean apply(String next) {
return logValue.getAllExistingFilesMeta().contains(next);
}
});
this.uploadedFileMeta = Sets.newHashSet(mask);
return logValue.getCurrentUpLoadedFilesPath();
}
}
// only for test
@VisibleForTesting
public UserGroupInformation getUgi() {
return this.userUgi;
}
}
| 24,825 | 38.157729 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.junit.Assert;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
import org.junit.Test;
public class TestYSCRecordFactory {
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
NodeHeartbeatRequest request = pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
Assert.assertEquals(NodeHeartbeatRequestPBImpl.class, request.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
}
| 1,718 | 36.369565 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl;
import org.junit.Assert;
import org.junit.Test;
/**
* Simple test classes from org.apache.hadoop.yarn.server.api
*/
public class TestYarnServerApiClasses {
private final static org.apache.hadoop.yarn.factories.RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
/**
* Test RegisterNodeManagerResponsePBImpl. Test getters and setters. The
* RegisterNodeManagerResponsePBImpl should generate a prototype and data
* restore from prototype
*/
@Test
public void testRegisterNodeManagerResponsePBImpl() {
RegisterNodeManagerResponsePBImpl original =
new RegisterNodeManagerResponsePBImpl();
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNodeAction(NodeAction.NORMAL);
original.setDiagnosticsMessage("testDiagnosticMessage");
RegisterNodeManagerResponsePBImpl copy =
new RegisterNodeManagerResponsePBImpl(
original.getProto());
assertEquals(1, copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getNMTokenMasterKey().getKeyId());
assertEquals(NodeAction.NORMAL, copy.getNodeAction());
assertEquals("testDiagnosticMessage", copy.getDiagnosticsMessage());
assertFalse(copy.getAreNodeLabelsAcceptedByRM());
}
@Test
public void testRegisterNodeManagerResponsePBImplWithRMAcceptLbls() {
RegisterNodeManagerResponsePBImpl original =
new RegisterNodeManagerResponsePBImpl();
original.setAreNodeLabelsAcceptedByRM(true);
RegisterNodeManagerResponsePBImpl copy =
new RegisterNodeManagerResponsePBImpl(original.getProto());
assertTrue(copy.getAreNodeLabelsAcceptedByRM());
}
/**
* Test NodeHeartbeatRequestPBImpl.
*/
@Test
public void testNodeHeartbeatRequestPBImpl() {
NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
original.setLastKnownContainerTokenMasterKey(getMasterKey());
original.setLastKnownNMTokenMasterKey(getMasterKey());
original.setNodeStatus(getNodeStatus());
original.setNodeLabels(getValidNodeLabels());
NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
original.getProto());
assertEquals(1, copy.getLastKnownContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getLastKnownNMTokenMasterKey().getKeyId());
assertEquals("localhost", copy.getNodeStatus().getNodeId().getHost());
// check labels are coming with valid values
Assert.assertTrue(original.getNodeLabels()
.containsAll(copy.getNodeLabels()));
// check for empty labels
original.setNodeLabels(new HashSet<NodeLabel> ());
copy = new NodeHeartbeatRequestPBImpl(
original.getProto());
Assert.assertNotNull(copy.getNodeLabels());
Assert.assertEquals(0, copy.getNodeLabels().size());
}
/**
* Test NodeHeartbeatRequestPBImpl.
*/
@Test
public void testNodeHeartbeatRequestPBImplWithNullLabels() {
NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
NodeHeartbeatRequestPBImpl copy =
new NodeHeartbeatRequestPBImpl(original.getProto());
Assert.assertNull(copy.getNodeLabels());
}
/**
* Test NodeHeartbeatResponsePBImpl.
*/
@Test
public void testNodeHeartbeatResponsePBImpl() {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
original.setDiagnosticsMessage("testDiagnosticMessage");
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNextHeartBeatInterval(1000);
original.setNodeAction(NodeAction.NORMAL);
original.setResponseId(100);
NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
original.getProto());
assertEquals(100, copy.getResponseId());
assertEquals(NodeAction.NORMAL, copy.getNodeAction());
assertEquals(1000, copy.getNextHeartBeatInterval());
assertEquals(1, copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getNMTokenMasterKey().getKeyId());
assertEquals("testDiagnosticMessage", copy.getDiagnosticsMessage());
assertEquals(false, copy.getAreNodeLabelsAcceptedByRM());
}
@Test
public void testNodeHeartbeatResponsePBImplWithRMAcceptLbls() {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
original.setAreNodeLabelsAcceptedByRM(true);
NodeHeartbeatResponsePBImpl copy =
new NodeHeartbeatResponsePBImpl(original.getProto());
assertTrue(copy.getAreNodeLabelsAcceptedByRM());
}
/**
* Test RegisterNodeManagerRequestPBImpl.
*/
@Test
public void testRegisterNodeManagerRequestPBImpl() {
RegisterNodeManagerRequestPBImpl original = new RegisterNodeManagerRequestPBImpl();
original.setHttpPort(8080);
original.setNodeId(getNodeId());
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(10000);
resource.setVirtualCores(2);
original.setResource(resource);
RegisterNodeManagerRequestPBImpl copy = new RegisterNodeManagerRequestPBImpl(
original.getProto());
assertEquals(8080, copy.getHttpPort());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(10000, copy.getResource().getMemory());
assertEquals(2, copy.getResource().getVirtualCores());
}
/**
* Test MasterKeyPBImpl.
*/
@Test
public void testMasterKeyPBImpl() {
MasterKeyPBImpl original = new MasterKeyPBImpl();
original.setBytes(ByteBuffer.allocate(0));
original.setKeyId(1);
MasterKeyPBImpl copy = new MasterKeyPBImpl(original.getProto());
assertEquals(1, copy.getKeyId());
assertTrue(original.equals(copy));
assertEquals(original.hashCode(), copy.hashCode());
}
/**
* Test SerializedExceptionPBImpl.
*/
@Test
public void testSerializedExceptionPBImpl() {
SerializedExceptionPBImpl original = new SerializedExceptionPBImpl();
original.init("testMessage");
SerializedExceptionPBImpl copy = new SerializedExceptionPBImpl(
original.getProto());
assertEquals("testMessage", copy.getMessage());
original = new SerializedExceptionPBImpl();
original.init("testMessage", new Throwable(new Throwable("parent")));
copy = new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage", copy.getMessage());
assertEquals("parent", copy.getCause().getMessage());
assertTrue( copy.getRemoteTrace().startsWith(
"java.lang.Throwable: java.lang.Throwable: parent"));
}
/**
* Test NodeStatusPBImpl.
*/
@Test
public void testNodeStatusPBImpl() {
NodeStatusPBImpl original = new NodeStatusPBImpl();
original.setContainersStatuses(Arrays.asList(getContainerStatus(1, 2, 1),
getContainerStatus(2, 3, 1)));
original.setKeepAliveApplications(Arrays.asList(getApplicationId(3),
getApplicationId(4)));
original.setNodeHealthStatus(getNodeHealthStatus());
original.setNodeId(getNodeId());
original.setResponseId(1);
NodeStatusPBImpl copy = new NodeStatusPBImpl(original.getProto());
assertEquals(3L, copy.getContainersStatuses().get(1).getContainerId()
.getContainerId());
assertEquals(3, copy.getKeepAliveApplications().get(0).getId());
assertEquals(1000, copy.getNodeHealthStatus().getLastHealthReportTime());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(1, copy.getResponseId());
}
@Test
public void testRegisterNodeManagerRequestWithNullLabels() {
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", null, null);
// serialze to proto, and get request from proto
RegisterNodeManagerRequest request1 =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check labels are coming with no values
Assert.assertNull(request1.getNodeLabels());
}
@Test
public void testRegisterNodeManagerRequestWithValidLabels() {
HashSet<NodeLabel> nodeLabels = getValidNodeLabels();
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", null, null, nodeLabels);
// serialze to proto, and get request from proto
RegisterNodeManagerRequest copy =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check labels are coming with valid values
Assert.assertEquals(true, nodeLabels.containsAll(copy.getNodeLabels()));
// check for empty labels
request.setNodeLabels(new HashSet<NodeLabel> ());
copy = new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
Assert.assertNotNull(copy.getNodeLabels());
Assert.assertEquals(0, copy.getNodeLabels().size());
}
@Test
public void testUnRegisterNodeManagerRequestPBImpl() throws Exception {
UnRegisterNodeManagerRequestPBImpl request = new UnRegisterNodeManagerRequestPBImpl();
NodeId nodeId = NodeId.newInstance("host", 1234);
request.setNodeId(nodeId);
UnRegisterNodeManagerRequestPBImpl copy = new UnRegisterNodeManagerRequestPBImpl(
request.getProto());
Assert.assertEquals(nodeId, copy.getNodeId());
}
private HashSet<NodeLabel> getValidNodeLabels() {
HashSet<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
nodeLabels.add(NodeLabel.newInstance("java"));
nodeLabels.add(NodeLabel.newInstance("windows"));
nodeLabels.add(NodeLabel.newInstance("gpu"));
nodeLabels.add(NodeLabel.newInstance("x86"));
return nodeLabels;
}
private ContainerStatus getContainerStatus(int applicationId,
int containerID, int appAttemptId) {
ContainerStatus status = recordFactory
.newRecordInstance(ContainerStatus.class);
status.setContainerId(getContainerId(containerID, appAttemptId));
return status;
}
private ApplicationAttemptId getApplicationAttemptId(int appAttemptId) {
ApplicationAttemptId result = ApplicationAttemptIdPBImpl.newInstance(
getApplicationId(appAttemptId), appAttemptId);
return result;
}
private ContainerId getContainerId(int containerID, int appAttemptId) {
ContainerId containerId = ContainerIdPBImpl.newContainerId(
getApplicationAttemptId(appAttemptId), containerID);
return containerId;
}
private ApplicationId getApplicationId(int applicationId) {
ApplicationIdPBImpl appId = new ApplicationIdPBImpl() {
public ApplicationIdPBImpl setParameters(int id, long timestamp) {
setClusterTimestamp(timestamp);
setId(id);
build();
return this;
}
}.setParameters(applicationId, 1000);
return new ApplicationIdPBImpl(appId.getProto());
}
private NodeStatus getNodeStatus() {
NodeStatus status = recordFactory.newRecordInstance(NodeStatus.class);
status.setContainersStatuses(new ArrayList<ContainerStatus>());
status.setKeepAliveApplications(new ArrayList<ApplicationId>());
status.setNodeHealthStatus(getNodeHealthStatus());
status.setNodeId(getNodeId());
status.setResponseId(1);
return status;
}
private NodeId getNodeId() {
return NodeId.newInstance("localhost", 9090);
}
private NodeHealthStatus getNodeHealthStatus() {
NodeHealthStatus healStatus = recordFactory
.newRecordInstance(NodeHealthStatus.class);
healStatus.setHealthReport("healthReport");
healStatus.setIsNodeHealthy(true);
healStatus.setLastHealthReportTime(1000);
return healStatus;
}
private MasterKey getMasterKey() {
MasterKey key = recordFactory.newRecordInstance(MasterKey.class);
key.setBytes(ByteBuffer.allocate(0));
key.setKeyId(1);
return key;
}
}
| 14,761 | 37.543081 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.junit.Test;
public class TestYSCRPCFactories {
@Test
public void test() {
testPbServerFactory();
testPbClientFactory();
}
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
ResourceTracker instance = new ResourceTrackerTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
ResourceTracker.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
server.stop();
}
}
private void testPbClientFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
System.err.println(addr.getHostName() + addr.getPort());
Configuration conf = new Configuration();
ResourceTracker instance = new ResourceTrackerTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
ResourceTracker.class, instance, addr, conf, null, 1);
server.start();
System.err.println(server.getListenerAddress());
System.err.println(NetUtils.getConnectAddress(server));
ResourceTracker client = null;
try {
client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(ResourceTracker.class, 1, NetUtils.getConnectAddress(server), conf);
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create client");
}
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
server.stop();
}
}
public class ResourceTrackerTestImpl implements ResourceTracker {
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
}
}
| 4,395 | 33.077519 | 142 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test ResourceTrackerPBClientImpl. this class should have methods
* registerNodeManager and newRecordInstance.
*/
public class TestResourceTrackerPBClientImpl {
private static ResourceTracker client;
private static Server server;
private final static org.apache.hadoop.yarn.factories.RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@BeforeClass
public static void start() {
InetSocketAddress address = new InetSocketAddress(0);
Configuration configuration = new Configuration();
ResourceTracker instance = new ResourceTrackerTestImpl();
server = RpcServerFactoryPBImpl.get().getServer(ResourceTracker.class,
instance, address, configuration, null, 1);
server.start();
client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(
ResourceTracker.class, 1, NetUtils.getConnectAddress(server),
configuration);
}
@AfterClass
public static void stop() {
if (server != null) {
server.stop();
}
}
/**
* Test the method registerNodeManager. Method should return a not null
* result.
*
*/
@Test
public void testResourceTrackerPBClientImpl() throws Exception {
RegisterNodeManagerRequest request = recordFactory
.newRecordInstance(RegisterNodeManagerRequest.class);
assertNotNull(client.registerNodeManager(request));
ResourceTrackerTestImpl.exception = true;
try {
client.registerNodeManager(request);
fail("there should be YarnException");
} catch (YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}finally{
ResourceTrackerTestImpl.exception = false;
}
}
/**
* Test the method nodeHeartbeat. Method should return a not null result.
*
*/
@Test
public void testNodeHeartbeat() throws Exception {
NodeHeartbeatRequest request = recordFactory
.newRecordInstance(NodeHeartbeatRequest.class);
assertNotNull(client.nodeHeartbeat(request));
ResourceTrackerTestImpl.exception = true;
try {
client.nodeHeartbeat(request);
fail("there should be YarnException");
} catch (YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}finally{
ResourceTrackerTestImpl.exception = false;
}
}
/**
* Test the method unRegisterNodeManager. Method should return a not null
* result.
*
*/
@Test
public void testUnRegisterNodeManager() throws Exception {
UnRegisterNodeManagerRequest request = UnRegisterNodeManagerRequest
.newInstance(NodeId.newInstance("host1", 1234));
assertNotNull(client.unRegisterNodeManager(request));
ResourceTrackerTestImpl.exception = true;
try {
client.unRegisterNodeManager(request);
fail("there should be YarnException");
} catch (YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
} finally {
ResourceTrackerTestImpl.exception = false;
}
}
public static class ResourceTrackerTestImpl implements ResourceTracker {
public static boolean exception = false;
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException {
if (exception) {
throw new YarnException("testMessage");
}
return recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
if (exception) {
throw new YarnException("testMessage");
}
return recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
if (exception) {
throw new YarnException("testMessage");
}
return UnRegisterNodeManagerResponse.newInstance();
}
}
}
| 6,070 | 33.299435 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/lib/TestZKClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.lib;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import org.junit.Assert;
import org.apache.hadoop.yarn.lib.ZKClient;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZKDatabase;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnLog;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestZKClient {
public static int CONNECTION_TIMEOUT = 30000;
static final File BASETEST =
new File(System.getProperty("build.test.dir", "target/zookeeper-build"));
protected String hostPort = "127.0.0.1:2000";
protected int maxCnxns = 0;
protected NIOServerCnxnFactory factory = null;
protected ZooKeeperServer zks;
protected File tmpDir = null;
public static String send4LetterWord(String host, int port, String cmd)
throws IOException
{
Socket sock = new Socket(host, port);
BufferedReader reader = null;
try {
OutputStream outstream = sock.getOutputStream();
outstream.write(cmd.getBytes());
outstream.flush();
// this replicates NC - close the output stream before reading
sock.shutdownOutput();
reader =
new BufferedReader(
new InputStreamReader(sock.getInputStream()));
StringBuilder sb = new StringBuilder();
String line;
while((line = reader.readLine()) != null) {
sb.append(line + "\n");
}
return sb.toString();
} finally {
sock.close();
if (reader != null) {
reader.close();
}
}
}
public static boolean waitForServerDown(String hp, long timeout) {
long start = System.currentTimeMillis();
while (true) {
try {
String host = hp.split(":")[0];
int port = Integer.parseInt(hp.split(":")[1]);
send4LetterWord(host, port, "stat");
} catch (IOException e) {
return true;
}
if (System.currentTimeMillis() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
public static boolean waitForServerUp(String hp, long timeout) {
long start = System.currentTimeMillis();
while (true) {
try {
String host = hp.split(":")[0];
int port = Integer.parseInt(hp.split(":")[1]);
// if there are multiple hostports, just take the first one
String result = send4LetterWord(host, port, "stat");
if (result.startsWith("Zookeeper version:")) {
return true;
}
} catch (IOException e) {
}
if (System.currentTimeMillis() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
public static File createTmpDir(File parentDir) throws IOException {
File tmpFile = File.createTempFile("test", ".junit", parentDir);
// don't delete tmpFile - this ensures we don't attempt to create
// a tmpDir with a duplicate name
File tmpDir = new File(tmpFile + ".dir");
Assert.assertFalse(tmpDir.exists());
Assert.assertTrue(tmpDir.mkdirs());
return tmpDir;
}
@Before
public void setUp() throws IOException, InterruptedException {
System.setProperty("zookeeper.preAllocSize", "100");
FileTxnLog.setPreallocSize(100 * 1024);
if (!BASETEST.exists()) {
BASETEST.mkdirs();
}
File dataDir = createTmpDir(BASETEST);
zks = new ZooKeeperServer(dataDir, dataDir, 3000);
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
if (factory == null) {
factory = new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress(PORT), maxCnxns);
}
factory.startup(zks);
Assert.assertTrue("waiting for server up",
waitForServerUp("127.0.0.1:" + PORT,
CONNECTION_TIMEOUT));
}
@After
public void tearDown() throws IOException, InterruptedException {
if (zks != null) {
ZKDatabase zkDb = zks.getZKDatabase();
factory.shutdown();
try {
zkDb.close();
} catch (IOException ie) {
}
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
Assert.assertTrue("waiting for server down",
waitForServerDown("127.0.0.1:" + PORT,
CONNECTION_TIMEOUT));
}
}
@Test
public void testzkClient() throws Exception {
test("/some/test");
}
private void test(String testClient) throws Exception {
ZKClient client = new ZKClient(hostPort);
client.registerService("/nodemanager", "hostPort");
client.unregisterService("/nodemanager");
}
}
| 5,702 | 29.015789 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NMContainerStatusPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
public class TestProtocolRecords {
@Test
public void testNMContainerStatus() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
Resource resource = Resource.newInstance(1000, 200);
NMContainerStatus report =
NMContainerStatus.newInstance(containerId,
ContainerState.COMPLETE, resource, "diagnostics",
ContainerExitStatus.ABORTED, Priority.newInstance(10), 1234);
NMContainerStatus reportProto =
new NMContainerStatusPBImpl(
((NMContainerStatusPBImpl) report).getProto());
Assert.assertEquals("diagnostics", reportProto.getDiagnostics());
Assert.assertEquals(resource, reportProto.getAllocatedResource());
Assert.assertEquals(ContainerExitStatus.ABORTED,
reportProto.getContainerExitStatus());
Assert.assertEquals(ContainerState.COMPLETE,
reportProto.getContainerState());
Assert.assertEquals(containerId, reportProto.getContainerId());
Assert.assertEquals(Priority.newInstance(10), reportProto.getPriority());
Assert.assertEquals(1234, reportProto.getCreationTime());
}
@Test
public void testRegisterNodeManagerRequest() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId,
ContainerState.RUNNING, Resource.newInstance(1024, 1), "diagnostics",
0, Priority.newInstance(10), 1234);
List<NMContainerStatus> reports = Arrays.asList(containerReport);
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("1.1.1.1", 1000), 8080,
Resource.newInstance(1024, 1), "NM-version-id", reports,
Arrays.asList(appId));
RegisterNodeManagerRequest requestProto =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
Assert.assertEquals(containerReport, requestProto
.getNMContainerStatuses().get(0));
Assert.assertEquals(8080, requestProto.getHttpPort());
Assert.assertEquals("NM-version-id", requestProto.getNMVersion());
Assert.assertEquals(NodeId.newInstance("1.1.1.1", 1000),
requestProto.getNodeId());
Assert.assertEquals(Resource.newInstance(1024, 1),
requestProto.getResource());
Assert.assertEquals(1, requestProto.getRunningApplications().size());
Assert.assertEquals(appId, requestProto.getRunningApplications().get(0));
}
@Test
public void testNodeHeartBeatResponse() throws IOException {
NodeHeartbeatResponse record =
Records.newRecord(NodeHeartbeatResponse.class);
Map<ApplicationId, ByteBuffer> appCredentials =
new HashMap<ApplicationId, ByteBuffer>();
Credentials app1Cred = new Credentials();
Token<DelegationTokenIdentifier> token1 =
new Token<DelegationTokenIdentifier>();
token1.setKind(new Text("kind1"));
app1Cred.addToken(new Text("token1"), token1);
Token<DelegationTokenIdentifier> token2 =
new Token<DelegationTokenIdentifier>();
token2.setKind(new Text("kind2"));
app1Cred.addToken(new Text("token2"), token2);
DataOutputBuffer dob = new DataOutputBuffer();
app1Cred.writeTokenStorageToStream(dob);
ByteBuffer byteBuffer1 = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
appCredentials.put(ApplicationId.newInstance(1234, 1), byteBuffer1);
record.setSystemCredentialsForApps(appCredentials);
NodeHeartbeatResponse proto =
new NodeHeartbeatResponsePBImpl(
((NodeHeartbeatResponsePBImpl) record).getProto());
Assert.assertEquals(appCredentials, proto.getSystemCredentialsForApps());
}
}
| 6,082 | 44.059259 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.util.Arrays;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.junit.Assert;
import org.junit.Test;
public class TestRegisterNodeManagerRequest {
@Test
public void testRegisterNodeManagerRequest() {
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", Arrays.asList(NMContainerStatus.newInstance(
ContainerId.newContainerId(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234L, 1), 1), 1),
ContainerState.RUNNING, Resource.newInstance(1024, 1), "good", -1,
Priority.newInstance(0), 1234)), Arrays.asList(
ApplicationId.newInstance(1234L, 1),
ApplicationId.newInstance(1234L, 2)));
// serialze to proto, and get request from proto
RegisterNodeManagerRequest request1 =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check values
Assert.assertEquals(request1.getNMContainerStatuses().size(), request
.getNMContainerStatuses().size());
Assert.assertEquals(request1.getNMContainerStatuses().get(0).getContainerId(),
request.getNMContainerStatuses().get(0).getContainerId());
Assert.assertEquals(request1.getRunningApplications().size(), request
.getRunningApplications().size());
Assert.assertEquals(request1.getRunningApplications().get(0), request
.getRunningApplications().get(0));
Assert.assertEquals(request1.getRunningApplications().get(1), request
.getRunningApplications().get(1));
}
@Test
public void testRegisterNodeManagerRequestWithNullArrays() {
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host", 1234),
1234, Resource.newInstance(0, 0), "version", null, null);
// serialze to proto, and get request from proto
RegisterNodeManagerRequest request1 =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check values
Assert.assertEquals(0, request1.getNMContainerStatuses().size());
Assert.assertEquals(0, request1.getRunningApplications().size());
}
}
| 3,638 | 42.843373 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import static org.junit.Assert.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.junit.Test;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
public class TestRegisterNodeManagerResponse {
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@Test
public void testRoundTrip() throws Exception {
RegisterNodeManagerResponse resp = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
byte b [] = {0,1,2,3,4,5};
MasterKey containerTokenMK =
recordFactory.newRecordInstance(MasterKey.class);
containerTokenMK.setKeyId(54321);
containerTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setContainerTokenMasterKey(containerTokenMK);
MasterKey nmTokenMK =
recordFactory.newRecordInstance(MasterKey.class);
nmTokenMK.setKeyId(12345);
nmTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setNMTokenMasterKey(nmTokenMK);
resp.setNodeAction(NodeAction.NORMAL);
assertEquals(NodeAction.NORMAL, resp.getNodeAction());
// Verifying containerTokenMasterKey
assertNotNull(resp.getContainerTokenMasterKey());
assertEquals(54321, resp.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b, resp.getContainerTokenMasterKey().getBytes().array());
RegisterNodeManagerResponse respCopy = serDe(resp);
assertEquals(NodeAction.NORMAL, respCopy.getNodeAction());
assertNotNull(respCopy.getContainerTokenMasterKey());
assertEquals(54321, respCopy.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b, respCopy.getContainerTokenMasterKey().getBytes()
.array());
// Verifying nmTokenMasterKey
assertNotNull(resp.getNMTokenMasterKey());
assertEquals(12345, resp.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b, resp.getNMTokenMasterKey().getBytes().array());
respCopy = serDe(resp);
assertEquals(NodeAction.NORMAL, respCopy.getNodeAction());
assertNotNull(respCopy.getNMTokenMasterKey());
assertEquals(12345, respCopy.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b, respCopy.getNMTokenMasterKey().getBytes().array());
}
public static RegisterNodeManagerResponse serDe(RegisterNodeManagerResponse orig) throws Exception {
RegisterNodeManagerResponsePBImpl asPB = (RegisterNodeManagerResponsePBImpl)orig;
RegisterNodeManagerResponseProto proto = asPB.getProto();
ByteArrayOutputStream out = new ByteArrayOutputStream();
proto.writeTo(out);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
RegisterNodeManagerResponseProto.Builder cp = RegisterNodeManagerResponseProto.newBuilder();
cp.mergeFrom(in);
return new RegisterNodeManagerResponsePBImpl(cp.build());
}
}
| 4,114 | 39.343137 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/utils/TestLeveldbIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.utils;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.DBIterator;
import org.junit.Test;
public class TestLeveldbIterator {
private static class CallInfo {
String methodName;
Object[] args;
Class<?>[] argTypes;
public CallInfo(String methodName, Object... args) {
this.methodName = methodName;
this.args = args;
argTypes = new Class[args.length];
for (int i = 0; i < args.length; ++i) {
argTypes[i] = args[i].getClass();
}
}
}
// array of methods that should throw DBException instead of raw
// runtime exceptions
private static CallInfo[] RTEXC_METHODS = new CallInfo[] {
new CallInfo("seek", new byte[0]),
new CallInfo("seekToFirst"),
new CallInfo("seekToLast"),
new CallInfo("hasNext"),
new CallInfo("next"),
new CallInfo("peekNext"),
new CallInfo("hasPrev"),
new CallInfo("prev"),
new CallInfo("peekPrev"),
new CallInfo("remove")
};
@Test
public void testExceptionHandling() throws Exception {
InvocationHandler rtExcHandler = new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
throw new RuntimeException("forced runtime error");
}
};
DBIterator dbiter = (DBIterator) Proxy.newProxyInstance(
DBIterator.class.getClassLoader(), new Class[] { DBIterator.class },
rtExcHandler);
LeveldbIterator iter = new LeveldbIterator(dbiter);
for (CallInfo ci : RTEXC_METHODS) {
Method method = iter.getClass().getMethod(ci.methodName, ci.argTypes);
assertNotNull("unable to locate method " + ci.methodName, method);
try {
method.invoke(iter, ci.args);
fail("operation should have thrown");
} catch (InvocationTargetException ite) {
Throwable exc = ite.getTargetException();
assertTrue("Method " + ci.methodName + " threw non-DBException: "
+ exc, exc instanceof DBException);
assertFalse("Method " + ci.methodName + " double-wrapped DBException",
exc.getCause() instanceof DBException);
}
}
// check close() throws IOException
try {
iter.close();
fail("operation shoul have thrown");
} catch (IOException e) {
// expected
}
}
}
| 3,534 | 32.990385 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.lib;
import org.apache.hadoop.classification.InterfaceAudience;
| 929 | 41.272727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.lib;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
/** ZK Registration Library
* currently does not use any authorization
*/
public class ZKClient {
private ZooKeeper zkClient;
/**
* the zookeeper client library to
* talk to zookeeper
* @param string the host
* @throws IOException
*/
public ZKClient(String string) throws IOException {
zkClient = new ZooKeeper(string, 30000, new ZKWatcher());
}
/**
* register the service to a specific path
* @param path the path in zookeeper namespace to register to
* @param data the data that is part of this registration
* @throws IOException
* @throws InterruptedException
*/
public void registerService(String path, String data) throws
IOException, InterruptedException {
try {
zkClient.create(path, data.getBytes(Charset.forName("UTF-8")),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch(KeeperException ke) {
throw new IOException(ke);
}
}
/**
* unregister the service.
* @param path the path at which the service was registered
* @throws IOException
* @throws InterruptedException
*/
public void unregisterService(String path) throws IOException,
InterruptedException {
try {
zkClient.delete(path, -1);
} catch(KeeperException ke) {
throw new IOException(ke);
}
}
/**
* list the services registered under a path
* @param path the path under which services are
* registered
* @return the list of names of services registered
* @throws IOException
* @throws InterruptedException
*/
public List<String> listServices(String path) throws IOException,
InterruptedException {
List<String> children = null;
try {
children = zkClient.getChildren(path, false);
} catch(KeeperException ke) {
throw new IOException(ke);
}
return children;
}
/**
* get data published by the service at the registration address
* @param path the path where the service is registered
* @return the data of the registered service
* @throws IOException
* @throws InterruptedException
*/
public String getServiceData(String path) throws IOException,
InterruptedException {
String data;
try {
Stat stat = new Stat();
byte[] byteData = zkClient.getData(path, false, stat);
data = new String(byteData, Charset.forName("UTF-8"));
} catch(KeeperException ke) {
throw new IOException(ke);
}
return data;
}
/**
* a watcher class that handles what events from
* zookeeper.
*
*/
private static class ZKWatcher implements Watcher {
@Override
public void process(WatchedEvent arg0) {
}
}
}
| 3,888 | 27.807407 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import java.lang.annotation.Annotation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
public class RMNMSecurityInfoClass extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
if (!protocol.equals(ResourceTrackerPB.class)) {
return null;
}
return new KerberosInfo() {
@Override
public Class<? extends Annotation> annotationType() {
return null;
}
@Override
public String serverPrincipal() {
return YarnConfiguration.RM_PRINCIPAL;
}
@Override
public String clientPrincipal() {
return YarnConfiguration.NM_PRINCIPAL;
}
};
}
@Override
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
return null;
}
}
| 1,891 | 29.516129 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/sharedcache/SharedCacheUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcache;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* A utility class that contains helper methods for dealing with the internal
* shared cache structure.
*/
@Private
@Unstable
public class SharedCacheUtil {
private static final Log LOG = LogFactory.getLog(SharedCacheUtil.class);
@Private
public static int getCacheDepth(Configuration conf) {
int cacheDepth =
conf.getInt(YarnConfiguration.SHARED_CACHE_NESTED_LEVEL,
YarnConfiguration.DEFAULT_SHARED_CACHE_NESTED_LEVEL);
if (cacheDepth <= 0) {
LOG.warn("Specified cache depth was less than or equal to zero."
+ " Using default value instead. Default: "
+ YarnConfiguration.DEFAULT_SHARED_CACHE_NESTED_LEVEL
+ ", Specified: " + cacheDepth);
cacheDepth = YarnConfiguration.DEFAULT_SHARED_CACHE_NESTED_LEVEL;
}
return cacheDepth;
}
@Private
public static String getCacheEntryPath(int cacheDepth, String cacheRoot,
String checksum) {
if (cacheDepth <= 0) {
throw new IllegalArgumentException(
"The cache depth must be greater than 0. Passed value: " + cacheDepth);
}
if (checksum.length() < cacheDepth) {
throw new IllegalArgumentException("The checksum passed was too short: "
+ checksum);
}
// Build the cache entry path to the specified depth. For example, if the
// depth is 3 and the checksum is 3c4f, the path would be:
// SHARED_CACHE_ROOT/3/c/4/3c4f
StringBuilder sb = new StringBuilder(cacheRoot);
for (int i = 0; i < cacheDepth; i++) {
sb.append(Path.SEPARATOR_CHAR);
sb.append(checksum.charAt(i));
}
sb.append(Path.SEPARATOR_CHAR).append(checksum);
return sb.toString();
}
@Private
public static String getCacheEntryGlobPattern(int depth) {
StringBuilder pattern = new StringBuilder();
for (int i = 0; i < depth; i++) {
pattern.append("*/");
}
pattern.append("*");
return pattern.toString();
}
}
| 3,155 | 33.304348 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security;
import java.security.SecureRandom;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
/**
* SecretManager for ContainerTokens. Extended by both RM and NM and hence is
* present in yarn-server-common package.
*
*/
public class BaseContainerTokenSecretManager extends
SecretManager<ContainerTokenIdentifier> {
private static Log LOG = LogFactory
.getLog(BaseContainerTokenSecretManager.class);
protected int serialNo = new SecureRandom().nextInt();
protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
protected final Lock readLock = readWriteLock.readLock();
protected final Lock writeLock = readWriteLock.writeLock();
/**
* THE masterKey. ResourceManager should persist this and recover it on
* restart instead of generating a new key. The NodeManagers get it from the
* ResourceManager and use it for validating container-tokens.
*/
protected MasterKeyData currentMasterKey;
protected final long containerTokenExpiryInterval;
public BaseContainerTokenSecretManager(Configuration conf) {
this.containerTokenExpiryInterval =
conf.getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
}
// Need lock as we increment serialNo etc.
protected MasterKeyData createNewMasterKey() {
this.writeLock.lock();
try {
return new MasterKeyData(serialNo++, generateSecret());
} finally {
this.writeLock.unlock();
}
}
@Private
public MasterKey getCurrentKey() {
this.readLock.lock();
try {
return this.currentMasterKey.getMasterKey();
} finally {
this.readLock.unlock();
}
}
@Override
public byte[] createPassword(ContainerTokenIdentifier identifier) {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating password for " + identifier.getContainerID()
+ " for user " + identifier.getUser() + " to be run on NM "
+ identifier.getNmHostAddress());
}
this.readLock.lock();
try {
return createPassword(identifier.getBytes(),
this.currentMasterKey.getSecretKey());
} finally {
this.readLock.unlock();
}
}
@Override
public byte[] retrievePassword(ContainerTokenIdentifier identifier)
throws SecretManager.InvalidToken {
this.readLock.lock();
try {
return retrievePasswordInternal(identifier, this.currentMasterKey);
} finally {
this.readLock.unlock();
}
}
protected byte[] retrievePasswordInternal(ContainerTokenIdentifier identifier,
MasterKeyData masterKey)
throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
if (LOG.isDebugEnabled()) {
LOG.debug("Retrieving password for " + identifier.getContainerID()
+ " for user " + identifier.getUser() + " to be run on NM "
+ identifier.getNmHostAddress());
}
return createPassword(identifier.getBytes(), masterKey.getSecretKey());
}
/**
* Used by the RPC layer.
*/
@Override
public ContainerTokenIdentifier createIdentifier() {
return new ContainerTokenIdentifier();
}
}
| 4,507 | 32.894737 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/MasterKeyData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security;
import java.nio.ByteBuffer;
import javax.crypto.SecretKey;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.util.Records;
public class MasterKeyData {
private final MasterKey masterKeyRecord;
// Underlying secret-key also stored to avoid repetitive encoding and
// decoding the masterKeyRecord bytes.
private final SecretKey generatedSecretKey;
public MasterKeyData(int serialNo, SecretKey secretKey) {
this.masterKeyRecord = Records.newRecord(MasterKey.class);
this.masterKeyRecord.setKeyId(serialNo);
this.generatedSecretKey = secretKey;
this.masterKeyRecord.setBytes(ByteBuffer.wrap(generatedSecretKey
.getEncoded()));
}
public MasterKeyData(MasterKey masterKeyRecord, SecretKey secretKey) {
this.masterKeyRecord = masterKeyRecord;
this.generatedSecretKey = secretKey;
}
public MasterKey getMasterKey() {
return this.masterKeyRecord;
}
public SecretKey getSecretKey() {
return this.generatedSecretKey;
}
}
| 1,880 | 30.881356 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseNMTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security;
import java.net.InetSocketAddress;
import java.security.SecureRandom;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
public class BaseNMTokenSecretManager extends
SecretManager<NMTokenIdentifier> {
private static Log LOG = LogFactory
.getLog(BaseNMTokenSecretManager.class);
protected int serialNo = new SecureRandom().nextInt();
protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
protected final Lock readLock = readWriteLock.readLock();
protected final Lock writeLock = readWriteLock.writeLock();
protected MasterKeyData currentMasterKey;
protected MasterKeyData createNewMasterKey() {
this.writeLock.lock();
try {
return new MasterKeyData(serialNo++, generateSecret());
} finally {
this.writeLock.unlock();
}
}
@Private
public MasterKey getCurrentKey() {
this.readLock.lock();
try {
return this.currentMasterKey.getMasterKey();
} finally {
this.readLock.unlock();
}
}
@Override
protected byte[] createPassword(NMTokenIdentifier identifier) {
if (LOG.isDebugEnabled()) {
LOG.debug("creating password for "
+ identifier.getApplicationAttemptId() + " for user "
+ identifier.getApplicationSubmitter() + " to run on NM "
+ identifier.getNodeId());
}
readLock.lock();
try {
return createPassword(identifier.getBytes(),
currentMasterKey.getSecretKey());
} finally {
readLock.unlock();
}
}
@Override
public byte[] retrievePassword(NMTokenIdentifier identifier)
throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
readLock.lock();
try {
return retrivePasswordInternal(identifier, currentMasterKey);
} finally {
readLock.unlock();
}
}
protected byte[] retrivePasswordInternal(NMTokenIdentifier identifier,
MasterKeyData masterKey) {
if (LOG.isDebugEnabled()) {
LOG.debug("creating password for "
+ identifier.getApplicationAttemptId() + " for user "
+ identifier.getApplicationSubmitter() + " to run on NM "
+ identifier.getNodeId());
}
return createPassword(identifier.getBytes(), masterKey.getSecretKey());
}
/**
* It is required for RPC
*/
@Override
public NMTokenIdentifier createIdentifier() {
return new NMTokenIdentifier();
}
/**
* Helper function for creating NMTokens.
*/
public Token createNMToken(ApplicationAttemptId applicationAttemptId,
NodeId nodeId, String applicationSubmitter) {
byte[] password;
NMTokenIdentifier identifier;
this.readLock.lock();
try {
identifier =
new NMTokenIdentifier(applicationAttemptId, nodeId,
applicationSubmitter, this.currentMasterKey.getMasterKey()
.getKeyId());
password = this.createPassword(identifier);
} finally {
this.readLock.unlock();
}
return newInstance(password, identifier);
}
public static Token newInstance(byte[] password,
NMTokenIdentifier identifier) {
NodeId nodeId = identifier.getNodeId();
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
Token nmToken =
Token.newInstance(identifier.getBytes(),
NMTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return nmToken;
}
}
| 5,041 | 32.171053 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security.http;
import java.io.IOException;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
@Private
@Unstable
public class RMAuthenticationFilter extends
DelegationTokenAuthenticationFilter {
static private AbstractDelegationTokenSecretManager<?> manager;
private static final String OLD_HEADER = "Hadoop-YARN-Auth-Delegation-Token";
public RMAuthenticationFilter() {
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
filterConfig.getServletContext().setAttribute(
DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,
manager);
super.init(filterConfig);
}
/**
* {@inheritDoc}
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
HttpServletRequest req = (HttpServletRequest) request;
String newHeader =
req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
if (newHeader == null || newHeader.isEmpty()) {
// For backward compatibility, allow use of the old header field
// only when the new header doesn't exist
final String oldHeader = req.getHeader(OLD_HEADER);
if (oldHeader != null && !oldHeader.isEmpty()) {
request = new HttpServletRequestWrapper(req) {
@Override
public String getHeader(String name) {
if (name
.equals(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)) {
return oldHeader;
}
return super.getHeader(name);
}
};
}
}
super.doFilter(request, response, filterChain);
}
public static void setDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager<?> manager) {
RMAuthenticationFilter.manager = manager;
}
}
| 3,337 | 36.088889 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security.http;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@Unstable
public class RMAuthenticationFilterInitializer extends FilterInitializer {
String configPrefix;
String kerberosPrincipalProperty;
String cookiePath;
public RMAuthenticationFilterInitializer() {
this.configPrefix = "hadoop.http.authentication.";
this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL;
this.cookiePath = "/";
}
protected Map<String, String> createFilterConfig(Configuration conf) {
Map<String, String> filterConfig = new HashMap<String, String>();
// setting the cookie path to root '/' so it is used for all resources.
filterConfig.put(AuthenticationFilter.COOKIE_PATH, cookiePath);
// Before conf object is passed in, RM has already processed it and used RM
// specific configs to overwrite hadoop common ones. Hence we just need to
// source hadoop.proxyuser configs here.
for (Map.Entry<String, String> entry : conf) {
String propName = entry.getKey();
if (propName.startsWith(configPrefix)) {
String value = conf.get(propName);
String name = propName.substring(configPrefix.length());
filterConfig.put(name, value);
} else if (propName.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
String value = conf.get(propName);
String name = propName.substring("hadoop.".length());
filterConfig.put(name, value);
}
}
// Resolve _HOST into bind address
String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
String principal = filterConfig.get(kerberosPrincipalProperty);
if (principal != null) {
try {
principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
} catch (IOException ex) {
throw new RuntimeException(
"Could not resolve Kerberos principal name: " + ex.toString(), ex);
}
filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
}
filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
RMDelegationTokenIdentifier.KIND_NAME.toString());
return filterConfig;
}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = createFilterConfig(conf);
container.addFilter("RMAuthenticationFilter",
RMAuthenticationFilter.class.getName(), filterConfig);
}
}
| 4,157 | 38.6 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
public class AppBlock extends HtmlBlock {
private static final Log LOG = LogFactory.getLog(AppBlock.class);
protected ApplicationBaseProtocol appBaseProt;
protected Configuration conf;
protected ApplicationId appID = null;
@Inject
protected AppBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx,
Configuration conf) {
super(ctx);
this.appBaseProt = appBaseProt;
this.conf = conf;
}
@Override
protected void render(Block html) {
String webUiType = $(WEB_UI_TYPE);
String aid = $(APPLICATION_ID);
if (aid.isEmpty()) {
puts("Bad request: requires Application ID");
return;
}
try {
appID = Apps.toAppID(aid);
} catch (Exception e) {
puts("Invalid Application ID: " + aid);
return;
}
UserGroupInformation callerUGI = getCallerUGI();
ApplicationReport appReport;
try {
final GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(appID);
if (callerUGI == null) {
appReport =
appBaseProt.getApplicationReport(request).getApplicationReport();
} else {
appReport = callerUGI.doAs(
new PrivilegedExceptionAction<ApplicationReport> () {
@Override
public ApplicationReport run() throws Exception {
return appBaseProt.getApplicationReport(request)
.getApplicationReport();
}
});
}
} catch (Exception e) {
String message = "Failed to read the application " + appID + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
if (appReport == null) {
puts("Application not found: " + aid);
return;
}
AppInfo app = new AppInfo(appReport);
setTitle(join("Application ", aid));
if (webUiType != null
&& webUiType.equals(YarnWebParams.RM_WEB_UI)
&& conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) {
// Application Kill
html.div()
.button()
.$onclick("confirmAction()").b("Kill Application")._()
._();
StringBuilder script = new StringBuilder();
script.append("function confirmAction() {")
.append(" b = confirm(\"Are you sure?\");")
.append(" if (b == true) {")
.append(" $.ajax({")
.append(" type: 'PUT',")
.append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
.append(" contentType: 'application/json',")
.append(" data: '{\"state\":\"KILLED\"}',")
.append(" dataType: 'json'")
.append(" }).done(function(data){")
.append(" setTimeout(function(){")
.append(" location.href = '/cluster/app/").append(aid).append("';")
.append(" }, 1000);")
.append(" }).fail(function(data){")
.append(" console.log(data);")
.append(" });")
.append(" }")
.append("}");
html.script().$type("text/javascript")._(script.toString())._();
}
String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
"/cluster/scheduler?openQueues=" + app.getQueue();
ResponseInfo overviewTable = info("Application Overview")
._("User:", schedulerPath, app.getUser())
._("Name:", app.getName())
._("Application Type:", app.getType())
._("Application Tags:",
app.getApplicationTags() == null ? "" : app.getApplicationTags())
._("Application Priority:", clarifyAppPriority(app.getPriority()))
._(
"YarnApplicationState:",
app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
.getAppState()))
._("Queue:", schedulerPath, app.getQueue())
._("FinalStatus Reported by AM:",
clairfyAppFinalStatus(app.getFinalAppStatus()))
._("Started:", Times.format(app.getStartedTime()))
._(
"Elapsed:",
StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
app.getFinishedTime())))
._(
"Tracking URL:",
app.getTrackingUrl() == null
|| app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app
.getTrackingUrl()),
app.getTrackingUrl() == null
|| app.getTrackingUrl().equals(UNAVAILABLE) ? "Unassigned" : app
.getAppState() == YarnApplicationState.FINISHED
|| app.getAppState() == YarnApplicationState.FAILED
|| app.getAppState() == YarnApplicationState.KILLED ? "History"
: "ApplicationMaster");
if (webUiType != null
&& webUiType.equals(YarnWebParams.RM_WEB_UI)) {
LogAggregationStatus status = getLogAggregationStatus();
if (status == null) {
overviewTable._("Log Aggregation Status", "N/A");
} else if (status == LogAggregationStatus.DISABLED
|| status == LogAggregationStatus.NOT_START
|| status == LogAggregationStatus.SUCCEEDED) {
overviewTable._("Log Aggregation Status", status.name());
} else {
overviewTable._("Log Aggregation Status",
root_url("logaggregationstatus", app.getAppId()), status.name());
}
}
overviewTable._("Diagnostics:",
app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
overviewTable._("Unmanaged Application:", app.isUnmanagedApp());
Collection<ApplicationAttemptReport> attempts;
try {
final GetApplicationAttemptsRequest request =
GetApplicationAttemptsRequest.newInstance(appID);
if (callerUGI == null) {
attempts = appBaseProt.getApplicationAttempts(request)
.getApplicationAttemptList();
} else {
attempts = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
@Override
public Collection<ApplicationAttemptReport> run() throws Exception {
return appBaseProt.getApplicationAttempts(request)
.getApplicationAttemptList();
}
});
}
} catch (Exception e) {
String message =
"Failed to read the attempts of the application " + appID + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
createApplicationMetricsTable(html);
html._(InfoBlock.class);
generateApplicationTable(html, callerUGI, attempts);
}
protected void generateApplicationTable(Block html,
UserGroupInformation callerUGI,
Collection<ApplicationAttemptReport> attempts) {
// Application Attempt Table
TBODY<TABLE<Hamlet>> tbody =
html.table("#attempts").thead().tr().th(".id", "Attempt ID")
.th(".started", "Started").th(".node", "Node").th(".logs", "Logs")
._()._().tbody();
StringBuilder attemptsTableData = new StringBuilder("[\n");
for (final ApplicationAttemptReport appAttemptReport : attempts) {
AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
ContainerReport containerReport;
try {
final GetContainerReportRequest request =
GetContainerReportRequest.newInstance(
appAttemptReport.getAMContainerId());
if (callerUGI == null) {
containerReport =
appBaseProt.getContainerReport(request).getContainerReport();
} else {
containerReport = callerUGI.doAs(
new PrivilegedExceptionAction<ContainerReport>() {
@Override
public ContainerReport run() throws Exception {
ContainerReport report = null;
if (request.getContainerId() != null) {
try {
report = appBaseProt.getContainerReport(request)
.getContainerReport();
} catch (ContainerNotFoundException ex) {
LOG.warn(ex.getMessage());
}
}
return report;
}
});
}
} catch (Exception e) {
String message =
"Failed to read the AM container of the application attempt "
+ appAttemptReport.getApplicationAttemptId() + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
long startTime = 0L;
String logsLink = null;
String nodeLink = null;
if (containerReport != null) {
ContainerInfo container = new ContainerInfo(containerReport);
startTime = container.getStartedTime();
logsLink = containerReport.getLogUrl();
nodeLink = containerReport.getNodeHttpAddress();
}
attemptsTableData
.append("[\"<a href='")
.append(url("appattempt", appAttempt.getAppAttemptId()))
.append("'>")
.append(appAttempt.getAppAttemptId())
.append("</a>\",\"")
.append(startTime)
.append("\",\"<a ")
.append(nodeLink == null ? "#" : "href='" + nodeLink)
.append("'>")
.append(nodeLink == null ? "N/A" : StringEscapeUtils
.escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink)))
.append("</a>\",\"<a ")
.append(logsLink == null ? "#" : "href='" + logsLink).append("'>")
.append(logsLink == null ? "N/A" : "Logs").append("</a>\"],\n");
}
if (attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
attemptsTableData.delete(attemptsTableData.length() - 2,
attemptsTableData.length() - 1);
}
attemptsTableData.append("]");
html.script().$type("text/javascript")
._("var attemptsTableData=" + attemptsTableData)._();
tbody._()._();
}
private String clarifyAppState(YarnApplicationState state) {
String ret = state.toString();
switch (state) {
case NEW:
return ret + ": waiting for application to be initialized";
case NEW_SAVING:
return ret + ": waiting for application to be persisted in state-store.";
case SUBMITTED:
return ret + ": waiting for application to be accepted by scheduler.";
case ACCEPTED:
return ret + ": waiting for AM container to be allocated, launched and"
+ " register with RM.";
case RUNNING:
return ret + ": AM has registered with RM and started running.";
default:
return ret;
}
}
private String clarifyAppPriority(int priority) {
return priority + " (Higher Integer value indicates higher priority)";
}
private String clairfyAppFinalStatus(FinalApplicationStatus status) {
if (status == FinalApplicationStatus.UNDEFINED) {
return "Application has not completed yet.";
}
return status.toString();
}
// The preemption metrics only need to be shown in RM WebUI
protected void createApplicationMetricsTable(Block html) {
}
// This will be overrided in RMAppBlock
protected LogAggregationStatus getLogAggregationStatus() {
return null;
}
}
| 13,985 | 37.108992 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_START_TIME_BEGIN;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_START_TIME_END;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPS_NUM;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.EnumSet;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang.math.LongRange;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
public class AppsBlock extends HtmlBlock {
private static final Log LOG = LogFactory.getLog(AppsBlock.class);
protected ApplicationBaseProtocol appBaseProt;
protected EnumSet<YarnApplicationState> reqAppStates;
protected UserGroupInformation callerUGI;
protected Collection<ApplicationReport> appReports;
@Inject
protected AppsBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
super(ctx);
this.appBaseProt = appBaseProt;
}
protected void fetchData() throws YarnException, IOException,
InterruptedException {
reqAppStates = EnumSet.noneOf(YarnApplicationState.class);
String reqStateString = $(APP_STATE);
if (reqStateString != null && !reqStateString.isEmpty()) {
String[] appStateStrings = reqStateString.split(",");
for (String stateString : appStateStrings) {
reqAppStates.add(YarnApplicationState.valueOf(stateString.trim()));
}
}
callerUGI = getCallerUGI();
final GetApplicationsRequest request =
GetApplicationsRequest.newInstance(reqAppStates);
String appsNumStr = $(APPS_NUM);
if (appsNumStr != null && !appsNumStr.isEmpty()) {
long appsNum = Long.parseLong(appsNumStr);
request.setLimit(appsNum);
}
String appStartedTimeBegainStr = $(APP_START_TIME_BEGIN);
long appStartedTimeBegain = 0;
if (appStartedTimeBegainStr != null && !appStartedTimeBegainStr.isEmpty()) {
appStartedTimeBegain = Long.parseLong(appStartedTimeBegainStr);
if (appStartedTimeBegain < 0) {
throw new BadRequestException(
"app.started-time.begin must be greater than 0");
}
}
String appStartedTimeEndStr = $(APP_START_TIME_END);
long appStartedTimeEnd = Long.MAX_VALUE;
if (appStartedTimeEndStr != null && !appStartedTimeEndStr.isEmpty()) {
appStartedTimeEnd = Long.parseLong(appStartedTimeEndStr);
if (appStartedTimeEnd < 0) {
throw new BadRequestException(
"app.started-time.end must be greater than 0");
}
}
if (appStartedTimeBegain > appStartedTimeEnd) {
throw new BadRequestException(
"app.started-time.end must be greater than app.started-time.begin");
}
request.setStartRange(
new LongRange(appStartedTimeBegain, appStartedTimeEnd));
if (callerUGI == null) {
appReports = appBaseProt.getApplications(request).getApplicationList();
} else {
appReports =
callerUGI
.doAs(new PrivilegedExceptionAction<Collection<ApplicationReport>>() {
@Override
public Collection<ApplicationReport> run() throws Exception {
return appBaseProt.getApplications(request)
.getApplicationList();
}
});
}
}
@Override
public void render(Block html) {
setTitle("Applications");
try {
fetchData();
}
catch( Exception e) {
String message = "Failed to read the applications.";
LOG.error(message, e);
html.p()._(message)._();
return;
}
renderData(html);
}
protected void renderData(Block html) {
TBODY<TABLE<Hamlet>> tbody =
html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User")
.th(".name", "Name").th(".type", "Application Type")
.th(".queue", "Queue").th(".starttime", "StartTime")
.th(".finishtime", "FinishTime").th(".state", "State")
.th(".finalstatus", "FinalStatus").th(".progress", "Progress")
.th(".ui", "Tracking UI")._()._().tbody();
StringBuilder appsTableData = new StringBuilder("[\n");
for (ApplicationReport appReport : appReports) {
// TODO: remove the following condition. It is still here because
// the history side implementation of ApplicationBaseProtocol
// hasn't filtering capability (YARN-1819).
if (!reqAppStates.isEmpty()
&& !reqAppStates.contains(appReport.getYarnApplicationState())) {
continue;
}
AppInfo app = new AppInfo(appReport);
String percent = StringUtils.format("%.1f", app.getProgress());
appsTableData
.append("[\"<a href='")
.append(url("app", app.getAppId()))
.append("'>")
.append(app.getAppId())
.append("</a>\",\"")
.append(
StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app
.getUser())))
.append("\",\"")
.append(
StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app
.getName())))
.append("\",\"")
.append(
StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app
.getType())))
.append("\",\"")
.append(
StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(app
.getQueue()))).append("\",\"").append(app.getStartedTime())
.append("\",\"").append(app.getFinishedTime())
.append("\",\"")
.append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
.append("\",\"")
.append(app.getFinalAppStatus())
.append("\",\"")
// Progress bar
.append("<br title='").append(percent).append("'> <div class='")
.append(C_PROGRESSBAR).append("' title='").append(join(percent, '%'))
.append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE)
.append("' style='").append(join("width:", percent, '%'))
.append("'> </div> </div>").append("\",\"<a ");
String trackingURL =
app.getTrackingUrl() == null
|| app.getTrackingUrl().equals(UNAVAILABLE) ? null : app
.getTrackingUrl();
String trackingUI =
app.getTrackingUrl() == null || app.getTrackingUrl().equals(UNAVAILABLE)
? "Unassigned"
: app.getAppState() == YarnApplicationState.FINISHED
|| app.getAppState() == YarnApplicationState.FAILED
|| app.getAppState() == YarnApplicationState.KILLED
? "History" : "ApplicationMaster";
appsTableData.append(trackingURL == null ? "#" : "href='" + trackingURL)
.append("'>").append(trackingUI).append("</a>\"],\n");
}
if (appsTableData.charAt(appsTableData.length() - 2) == ',') {
appsTableData.delete(appsTableData.length() - 2,
appsTableData.length() - 1);
}
appsTableData.append("]");
html.script().$type("text/javascript")
._("var appsTableData=" + appsTableData)._();
tbody._()._();
}
}
| 9,004 | 39.200893 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import java.lang.reflect.UndeclaredThrowableException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import org.apache.commons.lang.math.LongRange;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
public class WebServices {
protected ApplicationBaseProtocol appBaseProt;
public WebServices(ApplicationBaseProtocol appBaseProt) {
this.appBaseProt = appBaseProt;
}
public AppsInfo getApps(HttpServletRequest req, HttpServletResponse res,
String stateQuery, Set<String> statesQuery, String finalStatusQuery,
String userQuery, String queueQuery, String count, String startedBegin,
String startedEnd, String finishBegin, String finishEnd,
Set<String> applicationTypes) {
UserGroupInformation callerUGI = getUser(req);
boolean checkEnd = false;
boolean checkAppTypes = false;
boolean checkAppStates = false;
long countNum = Long.MAX_VALUE;
// set values suitable in case both of begin/end not specified
long sBegin = 0;
long sEnd = Long.MAX_VALUE;
long fBegin = 0;
long fEnd = Long.MAX_VALUE;
if (count != null && !count.isEmpty()) {
countNum = Long.parseLong(count);
if (countNum <= 0) {
throw new BadRequestException("limit value must be greater then 0");
}
}
if (startedBegin != null && !startedBegin.isEmpty()) {
sBegin = Long.parseLong(startedBegin);
if (sBegin < 0) {
throw new BadRequestException("startedTimeBegin must be greater than 0");
}
}
if (startedEnd != null && !startedEnd.isEmpty()) {
sEnd = Long.parseLong(startedEnd);
if (sEnd < 0) {
throw new BadRequestException("startedTimeEnd must be greater than 0");
}
}
if (sBegin > sEnd) {
throw new BadRequestException(
"startedTimeEnd must be greater than startTimeBegin");
}
if (finishBegin != null && !finishBegin.isEmpty()) {
checkEnd = true;
fBegin = Long.parseLong(finishBegin);
if (fBegin < 0) {
throw new BadRequestException("finishTimeBegin must be greater than 0");
}
}
if (finishEnd != null && !finishEnd.isEmpty()) {
checkEnd = true;
fEnd = Long.parseLong(finishEnd);
if (fEnd < 0) {
throw new BadRequestException("finishTimeEnd must be greater than 0");
}
}
if (fBegin > fEnd) {
throw new BadRequestException(
"finishTimeEnd must be greater than finishTimeBegin");
}
Set<String> appTypes = parseQueries(applicationTypes, false);
if (!appTypes.isEmpty()) {
checkAppTypes = true;
}
// stateQuery is deprecated.
if (stateQuery != null && !stateQuery.isEmpty()) {
statesQuery.add(stateQuery);
}
Set<String> appStates = parseQueries(statesQuery, true);
if (!appStates.isEmpty()) {
checkAppStates = true;
}
AppsInfo allApps = new AppsInfo();
Collection<ApplicationReport> appReports = null;
final GetApplicationsRequest request =
GetApplicationsRequest.newInstance();
request.setLimit(countNum);
request.setStartRange(new LongRange(sBegin, sEnd));
try {
if (callerUGI == null) {
// TODO: the request should take the params like what RMWebServices does
// in YARN-1819.
appReports = appBaseProt.getApplications(request).getApplicationList();
} else {
appReports = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ApplicationReport>> () {
@Override
public Collection<ApplicationReport> run() throws Exception {
return appBaseProt.getApplications(request).getApplicationList();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
for (ApplicationReport appReport : appReports) {
if (checkAppStates &&
!appStates.contains(StringUtils.toLowerCase(
appReport.getYarnApplicationState().toString()))) {
continue;
}
if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) {
FinalApplicationStatus.valueOf(finalStatusQuery);
if (!appReport.getFinalApplicationStatus().toString()
.equalsIgnoreCase(finalStatusQuery)) {
continue;
}
}
if (userQuery != null && !userQuery.isEmpty()) {
if (!appReport.getUser().equals(userQuery)) {
continue;
}
}
if (queueQuery != null && !queueQuery.isEmpty()) {
if (!appReport.getQueue().equals(queueQuery)) {
continue;
}
}
if (checkAppTypes &&
!appTypes.contains(
StringUtils.toLowerCase(appReport.getApplicationType().trim()))) {
continue;
}
if (checkEnd
&& (appReport.getFinishTime() < fBegin || appReport.getFinishTime() > fEnd)) {
continue;
}
AppInfo app = new AppInfo(appReport);
allApps.add(app);
}
return allApps;
}
public AppInfo getApp(HttpServletRequest req, HttpServletResponse res,
String appId) {
UserGroupInformation callerUGI = getUser(req);
final ApplicationId id = parseApplicationId(appId);
ApplicationReport app = null;
try {
if (callerUGI == null) {
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(id);
app = appBaseProt.getApplicationReport(request).getApplicationReport();
} else {
app = callerUGI.doAs(
new PrivilegedExceptionAction<ApplicationReport> () {
@Override
public ApplicationReport run() throws Exception {
GetApplicationReportRequest request =
GetApplicationReportRequest.newInstance(id);
return appBaseProt.getApplicationReport(request).getApplicationReport();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
if (app == null) {
throw new NotFoundException("app with id: " + appId + " not found");
}
return new AppInfo(app);
}
public AppAttemptsInfo getAppAttempts(HttpServletRequest req,
HttpServletResponse res, String appId) {
UserGroupInformation callerUGI = getUser(req);
final ApplicationId id = parseApplicationId(appId);
Collection<ApplicationAttemptReport> appAttemptReports = null;
try {
if (callerUGI == null) {
GetApplicationAttemptsRequest request =
GetApplicationAttemptsRequest.newInstance(id);
appAttemptReports =
appBaseProt.getApplicationAttempts(request)
.getApplicationAttemptList();
} else {
appAttemptReports = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
@Override
public Collection<ApplicationAttemptReport> run() throws Exception {
GetApplicationAttemptsRequest request =
GetApplicationAttemptsRequest.newInstance(id);
return appBaseProt.getApplicationAttempts(request)
.getApplicationAttemptList();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo();
for (ApplicationAttemptReport appAttemptReport : appAttemptReports) {
AppAttemptInfo appAttemptInfo = new AppAttemptInfo(appAttemptReport);
appAttemptsInfo.add(appAttemptInfo);
}
return appAttemptsInfo;
}
public AppAttemptInfo getAppAttempt(HttpServletRequest req,
HttpServletResponse res, String appId, String appAttemptId) {
UserGroupInformation callerUGI = getUser(req);
ApplicationId aid = parseApplicationId(appId);
final ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId);
validateIds(aid, aaid, null);
ApplicationAttemptReport appAttempt = null;
try {
if (callerUGI == null) {
GetApplicationAttemptReportRequest request =
GetApplicationAttemptReportRequest.newInstance(aaid);
appAttempt =
appBaseProt.getApplicationAttemptReport(request)
.getApplicationAttemptReport();
} else {
appAttempt = callerUGI.doAs(
new PrivilegedExceptionAction<ApplicationAttemptReport> () {
@Override
public ApplicationAttemptReport run() throws Exception {
GetApplicationAttemptReportRequest request =
GetApplicationAttemptReportRequest.newInstance(aaid);
return appBaseProt.getApplicationAttemptReport(request)
.getApplicationAttemptReport();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
if (appAttempt == null) {
throw new NotFoundException("app attempt with id: " + appAttemptId
+ " not found");
}
return new AppAttemptInfo(appAttempt);
}
public ContainersInfo getContainers(HttpServletRequest req,
HttpServletResponse res, String appId, String appAttemptId) {
UserGroupInformation callerUGI = getUser(req);
ApplicationId aid = parseApplicationId(appId);
final ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId);
validateIds(aid, aaid, null);
Collection<ContainerReport> containerReports = null;
try {
if (callerUGI == null) {
GetContainersRequest request = GetContainersRequest.newInstance(aaid);
containerReports =
appBaseProt.getContainers(request).getContainerList();
} else {
containerReports = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ContainerReport>> () {
@Override
public Collection<ContainerReport> run() throws Exception {
GetContainersRequest request = GetContainersRequest.newInstance(aaid);
return appBaseProt.getContainers(request).getContainerList();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
ContainersInfo containersInfo = new ContainersInfo();
for (ContainerReport containerReport : containerReports) {
ContainerInfo containerInfo = new ContainerInfo(containerReport);
containersInfo.add(containerInfo);
}
return containersInfo;
}
public ContainerInfo getContainer(HttpServletRequest req,
HttpServletResponse res, String appId, String appAttemptId,
String containerId) {
UserGroupInformation callerUGI = getUser(req);
ApplicationId aid = parseApplicationId(appId);
ApplicationAttemptId aaid = parseApplicationAttemptId(appAttemptId);
final ContainerId cid = parseContainerId(containerId);
validateIds(aid, aaid, cid);
ContainerReport container = null;
try {
if (callerUGI == null) {
GetContainerReportRequest request =
GetContainerReportRequest.newInstance(cid);
container =
appBaseProt.getContainerReport(request).getContainerReport();
} else {
container = callerUGI.doAs(
new PrivilegedExceptionAction<ContainerReport> () {
@Override
public ContainerReport run() throws Exception {
GetContainerReportRequest request =
GetContainerReportRequest.newInstance(cid);
return appBaseProt.getContainerReport(request).getContainerReport();
}
});
}
} catch (Exception e) {
rewrapAndThrowException(e);
}
if (container == null) {
throw new NotFoundException("container with id: " + containerId
+ " not found");
}
return new ContainerInfo(container);
}
protected void init(HttpServletResponse response) {
// clear content type
response.setContentType(null);
}
protected static Set<String>
parseQueries(Set<String> queries, boolean isState) {
Set<String> params = new HashSet<String>();
if (!queries.isEmpty()) {
for (String query : queries) {
if (query != null && !query.trim().isEmpty()) {
String[] paramStrs = query.split(",");
for (String paramStr : paramStrs) {
if (paramStr != null && !paramStr.trim().isEmpty()) {
if (isState) {
try {
// enum string is in the uppercase
YarnApplicationState.valueOf(
StringUtils.toUpperCase(paramStr.trim()));
} catch (RuntimeException e) {
YarnApplicationState[] stateArray =
YarnApplicationState.values();
String allAppStates = Arrays.toString(stateArray);
throw new BadRequestException("Invalid application-state "
+ paramStr.trim() + " specified. It should be one of "
+ allAppStates);
}
}
params.add(StringUtils.toLowerCase(paramStr.trim()));
}
}
}
}
}
return params;
}
protected static ApplicationId parseApplicationId(String appId) {
if (appId == null || appId.isEmpty()) {
throw new NotFoundException("appId, " + appId + ", is empty or null");
}
ApplicationId aid = ConverterUtils.toApplicationId(appId);
if (aid == null) {
throw new NotFoundException("appId is null");
}
return aid;
}
protected static ApplicationAttemptId parseApplicationAttemptId(
String appAttemptId) {
if (appAttemptId == null || appAttemptId.isEmpty()) {
throw new NotFoundException("appAttemptId, " + appAttemptId
+ ", is empty or null");
}
ApplicationAttemptId aaid =
ConverterUtils.toApplicationAttemptId(appAttemptId);
if (aaid == null) {
throw new NotFoundException("appAttemptId is null");
}
return aaid;
}
protected static ContainerId parseContainerId(String containerId) {
if (containerId == null || containerId.isEmpty()) {
throw new NotFoundException("containerId, " + containerId
+ ", is empty or null");
}
ContainerId cid = ConverterUtils.toContainerId(containerId);
if (cid == null) {
throw new NotFoundException("containerId is null");
}
return cid;
}
protected void validateIds(ApplicationId appId,
ApplicationAttemptId appAttemptId, ContainerId containerId) {
if (!appAttemptId.getApplicationId().equals(appId)) {
throw new NotFoundException("appId and appAttemptId don't match");
}
if (containerId != null
&& !containerId.getApplicationAttemptId().equals(appAttemptId)) {
throw new NotFoundException("appAttemptId and containerId don't match");
}
}
protected static UserGroupInformation getUser(HttpServletRequest req) {
String remoteUser = req.getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
}
private static void rewrapAndThrowException(Exception e) {
if (e instanceof UndeclaredThrowableException) {
rewrapAndThrowThrowable(e.getCause());
} else {
rewrapAndThrowThrowable(e);
}
}
private static void rewrapAndThrowThrowable(Throwable t) {
if (t instanceof AuthorizationException) {
throw new ForbiddenException(t);
} else if (t instanceof ApplicationNotFoundException ||
t instanceof ApplicationAttemptNotFoundException ||
t instanceof ContainerNotFoundException) {
throw new NotFoundException(t);
} else {
throw new WebApplicationException(t);
}
}
}
| 18,602 | 36.430584 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import com.google.inject.Inject;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ErrorsAndWarningsBlock extends HtmlBlock {
long cutoffPeriodSeconds;
final private AdminACLsManager adminAclsManager;
@Inject
ErrorsAndWarningsBlock(ViewContext ctx, Configuration conf) {
super(ctx);
// default is to show all errors and warnings
cutoffPeriodSeconds = Time.now() / 1000;
String value = ctx.requestContext().get("cutoff", "");
try {
cutoffPeriodSeconds = Integer.parseInt(value);
if (cutoffPeriodSeconds <= 0) {
cutoffPeriodSeconds = Time.now() / 1000;
}
} catch (NumberFormatException ne) {
cutoffPeriodSeconds = Time.now() / 1000;
}
adminAclsManager = new AdminACLsManager(conf);
}
@Override
protected void render(Block html) {
Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
boolean isAdmin = false;
UserGroupInformation callerUGI = this.getCallerUGI();
if (adminAclsManager.areACLsEnabled()) {
if (callerUGI != null && adminAclsManager.isAdmin(callerUGI)) {
isAdmin = true;
}
} else {
isAdmin = true;
}
if (!isAdmin) {
html.div().p()._("This page is for admins only.")._()._();
return;
}
if (log instanceof Log4JLogger) {
html._(ErrorMetrics.class);
html._(WarningMetrics.class);
html.div().button().$onclick("reloadPage()").b("View data for the last ")
._().select().$id("cutoff").option().$value("60")._("1 min")._()
.option().$value("300")._("5 min")._().option().$value("900")
._("15 min")._().option().$value("3600")._("1 hour")._().option()
.$value("21600")._("6 hours")._().option().$value("43200")
._("12 hours")._().option().$value("86400")._("24 hours")._()._()._();
String script = "function reloadPage() {"
+ " var timePeriod = $(\"#cutoff\").val();"
+ " document.location.href = '/cluster/errors-and-warnings?cutoff=' + timePeriod"
+ "}";
script = script
+ "; function toggleContent(element) {"
+ " $(element).parent().siblings('.toggle-content').fadeToggle();"
+ "}";
html.script().$type("text/javascript")._(script)._();
html.style(".toggle-content { display: none; }");
Log4jWarningErrorMetricsAppender appender =
Log4jWarningErrorMetricsAppender.findAppender();
if (appender == null) {
return;
}
List<Long> cutoff = new ArrayList<>();
Hamlet.TBODY<Hamlet.TABLE<Hamlet>> errorsTable =
html.table("#messages").thead().tr().th(".message", "Message")
.th(".type", "Type").th(".count", "Count")
.th(".lasttime", "Latest Message Time")._()._().tbody();
// cutoff has to be in seconds
cutoff.add((Time.now() - cutoffPeriodSeconds * 1000) / 1000);
List<Map<String, Log4jWarningErrorMetricsAppender.Element>> errorsData =
appender.getErrorMessagesAndCounts(cutoff);
List<Map<String, Log4jWarningErrorMetricsAppender.Element>> warningsData =
appender.getWarningMessagesAndCounts(cutoff);
Map<String, List<Map<String, Log4jWarningErrorMetricsAppender.Element>>> sources =
new HashMap<>();
sources.put("Error", errorsData);
sources.put("Warning", warningsData);
int maxDisplayLength = 80;
for (Map.Entry<String, List<Map<String, Log4jWarningErrorMetricsAppender.Element>>> source : sources
.entrySet()) {
String type = source.getKey();
List<Map<String, Log4jWarningErrorMetricsAppender.Element>> data =
source.getValue();
if (data.size() > 0) {
Map<String, Log4jWarningErrorMetricsAppender.Element> map = data.get(0);
for (Map.Entry<String, Log4jWarningErrorMetricsAppender.Element> entry : map
.entrySet()) {
String message = entry.getKey();
Hamlet.TR<Hamlet.TBODY<Hamlet.TABLE<Hamlet>>> row =
errorsTable.tr();
Hamlet.TD<Hamlet.TR<Hamlet.TBODY<Hamlet.TABLE<Hamlet>>>> cell =
row.td();
if (message.length() > maxDisplayLength || message.contains("\n")) {
String displayMessage = entry.getKey().split("\n")[0];
if (displayMessage.length() > maxDisplayLength) {
displayMessage = displayMessage.substring(0, maxDisplayLength);
}
cell.pre().a().$href("#").$onclick("toggleContent(this);")
.$style("white-space: pre")._(displayMessage)._()._().div()
.$class("toggle-content").pre()._(message)._()._()._();
} else {
cell.pre()._(message)._()._();
}
Log4jWarningErrorMetricsAppender.Element ele = entry.getValue();
row.td(type).td(String.valueOf(ele.count))
.td(Times.format(ele.timestampSeconds * 1000))._();
}
}
}
errorsTable._()._();
}
}
public static class MetricsBase extends HtmlBlock {
List<Long> cutoffs;
List<Integer> values;
String tableHeading;
Log4jWarningErrorMetricsAppender appender;
MetricsBase(ViewContext ctx) {
super(ctx);
cutoffs = new ArrayList<>();
// cutoff has to be in seconds
long now = Time.now();
cutoffs.add((now - 60 * 1000) / 1000);
cutoffs.add((now - 300 * 1000) / 1000);
cutoffs.add((now - 900 * 1000) / 1000);
cutoffs.add((now - 3600 * 1000) / 1000);
cutoffs.add((now - 21600 * 1000) / 1000);
cutoffs.add((now - 43200 * 1000) / 1000);
cutoffs.add((now - 84600 * 1000) / 1000);
Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
if (log instanceof Log4JLogger) {
appender =
Log4jWarningErrorMetricsAppender.findAppender();
}
}
List<Long> getCutoffs() {
return this.cutoffs;
}
@Override
protected void render(Block html) {
Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
if (log instanceof Log4JLogger) {
Hamlet.DIV<Hamlet> div =
html.div().$class("metrics").$style("padding-bottom: 20px");
div.h3(tableHeading).table("#metricsoverview").thead()
.$class("ui-widget-header").tr().th().$class("ui-state-default")
._("Last 1 minute")._().th().$class("ui-state-default")
._("Last 5 minutes")._().th().$class("ui-state-default")
._("Last 15 minutes")._().th().$class("ui-state-default")
._("Last 1 hour")._().th().$class("ui-state-default")
._("Last 6 hours")._().th().$class("ui-state-default")
._("Last 12 hours")._().th().$class("ui-state-default")
._("Last 24 hours")._()._()._().tbody().$class("ui-widget-content")
.tr().td(String.valueOf(values.get(0)))
.td(String.valueOf(values.get(1))).td(String.valueOf(values.get(2)))
.td(String.valueOf(values.get(3))).td(String.valueOf(values.get(4)))
.td(String.valueOf(values.get(5))).td(String.valueOf(values.get(6)))
._()._()._();
div._();
}
}
}
public static class ErrorMetrics extends MetricsBase {
@Inject
ErrorMetrics(ViewContext ctx) {
super(ctx);
tableHeading = "Error Metrics";
}
@Override
protected void render(Block html) {
if (appender == null) {
return;
}
values = appender.getErrorCounts(getCutoffs());
super.render(html);
}
}
public static class WarningMetrics extends MetricsBase {
@Inject
WarningMetrics(ViewContext ctx) {
super(ctx);
tableHeading = "Warning Metrics";
}
@Override
protected void render(Block html) {
if (appender == null) {
return;
}
values = appender.getWarningCounts(getCutoffs());
super.render(html);
}
}
}
| 9,361 | 35.858268 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
public class WebPageUtils {
public static String appsTableInit() {
return appsTableInit(false, true);
}
public static String appsTableInit(boolean isResourceManager) {
return appsTableInit(false, isResourceManager);
}
public static String appsTableInit(
boolean isFairSchedulerPage, boolean isResourceManager) {
// id, user, name, queue, starttime, finishtime, state, status, progress, ui
// FairSchedulerPage's table is a bit different
return tableInit()
.append(", 'aaData': appsTableData")
.append(", bDeferRender: true")
.append(", bProcessing: true")
.append("\n, aoColumnDefs: ")
.append(getAppsTableColumnDefs(isFairSchedulerPage, isResourceManager))
// Sort by id upon page load
.append(", aaSorting: [[0, 'desc']]}").toString();
}
private static String getAppsTableColumnDefs(
boolean isFairSchedulerPage, boolean isResourceManager) {
StringBuilder sb = new StringBuilder();
sb.append("[\n")
.append("{'sType':'string', 'aTargets': [0]")
.append(", 'mRender': parseHadoopID }")
.append("\n, {'sType':'numeric', 'aTargets': " +
(isFairSchedulerPage ? "[6, 7]": "[6, 7]"))
.append(", 'mRender': renderHadoopDate }")
.append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
if (isFairSchedulerPage) {
sb.append("[13]");
} else if (isResourceManager) {
sb.append("[13]");
} else {
sb.append("[9]");
}
sb.append(", 'mRender': parseHadoopProgress }]");
return sb.toString();
}
public static String attemptsTableInit() {
return tableInit().append(", 'aaData': attemptsTableData")
.append(", bDeferRender: true").append(", bProcessing: true")
.append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
// Sort by id upon page load
.append(", aaSorting: [[0, 'desc']]}").toString();
}
private static String getAttemptsTableColumnDefs() {
StringBuilder sb = new StringBuilder();
return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
.append(", 'mRender': parseHadoopID }")
.append("\n, {'sType':'numeric', 'aTargets': [1]")
.append(", 'mRender': renderHadoopDate }]").toString();
}
public static String containersTableInit() {
return tableInit().append(", 'aaData': containersTableData")
.append(", bDeferRender: true").append(", bProcessing: true")
.append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
// Sort by id upon page load
.append(", aaSorting: [[0, 'desc']]}").toString();
}
private static String getContainersTableColumnDefs() {
StringBuilder sb = new StringBuilder();
return sb.append("[\n").append("{'sType':'string', 'aTargets': [0]")
.append(", 'mRender': parseHadoopID }]").toString();
}
}
| 3,780 | 37.191919 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
public class ContainerBlock extends HtmlBlock {
private static final Log LOG = LogFactory.getLog(ContainerBlock.class);
protected ApplicationBaseProtocol appBaseProt;
@Inject
public ContainerBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
super(ctx);
this.appBaseProt = appBaseProt;
}
@Override
protected void render(Block html) {
String containerid = $(CONTAINER_ID);
if (containerid.isEmpty()) {
puts("Bad request: requires container ID");
return;
}
ContainerId containerId = null;
try {
containerId = ConverterUtils.toContainerId(containerid);
} catch (IllegalArgumentException e) {
puts("Invalid container ID: " + containerid);
return;
}
UserGroupInformation callerUGI = getCallerUGI();
ContainerReport containerReport = null;
try {
final GetContainerReportRequest request =
GetContainerReportRequest.newInstance(containerId);
if (callerUGI == null) {
containerReport = appBaseProt.getContainerReport(request)
.getContainerReport();
} else {
containerReport = callerUGI.doAs(
new PrivilegedExceptionAction<ContainerReport> () {
@Override
public ContainerReport run() throws Exception {
return appBaseProt.getContainerReport(request)
.getContainerReport();
}
});
}
} catch (Exception e) {
String message = "Failed to read the container " + containerid + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
if (containerReport == null) {
puts("Container not found: " + containerid);
return;
}
ContainerInfo container = new ContainerInfo(containerReport);
setTitle(join("Container ", containerid));
info("Container Overview")
._(
"Container State:",
container.getContainerState() == null ? UNAVAILABLE : container
.getContainerState())
._("Exit Status:", container.getContainerExitStatus())
._(
"Node:",
container.getNodeHttpAddress() == null ? "#" : container
.getNodeHttpAddress(),
container.getNodeHttpAddress() == null ? "N/A" : container
.getNodeHttpAddress())
._("Priority:", container.getPriority())
._("Started:", Times.format(container.getStartedTime()))
._(
"Elapsed:",
StringUtils.formatTime(Times.elapsed(container.getStartedTime(),
container.getFinishedTime())))
._(
"Resource:",
container.getAllocatedMB() + " Memory, "
+ container.getAllocatedVCores() + " VCores")
._("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(),
container.getLogUrl() == null ? "N/A" : "Logs")
._("Diagnostics:", container.getDiagnosticsInfo() == null ?
"" : container.getDiagnosticsInfo());
html._(InfoBlock.class);
}
}
| 4,730 | 35.392308 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ATTEMPT_ID;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
import com.google.inject.Inject;
public class AppAttemptBlock extends HtmlBlock {
private static final Log LOG = LogFactory.getLog(AppAttemptBlock.class);
protected ApplicationBaseProtocol appBaseProt;
protected ApplicationAttemptId appAttemptId = null;
@Inject
public AppAttemptBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
super(ctx);
this.appBaseProt = appBaseProt;
}
@Override
protected void render(Block html) {
String attemptid = $(APPLICATION_ATTEMPT_ID);
if (attemptid.isEmpty()) {
puts("Bad request: requires application attempt ID");
return;
}
try {
appAttemptId = ConverterUtils.toApplicationAttemptId(attemptid);
} catch (IllegalArgumentException e) {
puts("Invalid application attempt ID: " + attemptid);
return;
}
UserGroupInformation callerUGI = getCallerUGI();
ApplicationAttemptReport appAttemptReport;
try {
final GetApplicationAttemptReportRequest request =
GetApplicationAttemptReportRequest.newInstance(appAttemptId);
if (callerUGI == null) {
appAttemptReport =
appBaseProt.getApplicationAttemptReport(request)
.getApplicationAttemptReport();
} else {
appAttemptReport = callerUGI.doAs(
new PrivilegedExceptionAction<ApplicationAttemptReport> () {
@Override
public ApplicationAttemptReport run() throws Exception {
return appBaseProt.getApplicationAttemptReport(request)
.getApplicationAttemptReport();
}
});
}
} catch (Exception e) {
String message =
"Failed to read the application attempt " + appAttemptId + ".";
LOG.error(message, e);
html.p()._(message)._();
return;
}
if (appAttemptReport == null) {
puts("Application Attempt not found: " + attemptid);
return;
}
boolean exceptionWhenGetContainerReports = false;
Collection<ContainerReport> containers = null;
try {
final GetContainersRequest request =
GetContainersRequest.newInstance(appAttemptId);
if (callerUGI == null) {
containers = appBaseProt.getContainers(request).getContainerList();
} else {
containers = callerUGI.doAs(
new PrivilegedExceptionAction<Collection<ContainerReport>> () {
@Override
public Collection<ContainerReport> run() throws Exception {
return appBaseProt.getContainers(request).getContainerList();
}
});
}
} catch (RuntimeException e) {
// have this block to suppress the findbugs warning
exceptionWhenGetContainerReports = true;
} catch (Exception e) {
exceptionWhenGetContainerReports = true;
}
AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
setTitle(join("Application Attempt ", attemptid));
String node = "N/A";
if (appAttempt.getHost() != null && appAttempt.getRpcPort() >= 0
&& appAttempt.getRpcPort() < 65536) {
node = appAttempt.getHost() + ":" + appAttempt.getRpcPort();
}
generateOverview(appAttemptReport, containers, appAttempt, node);
if (exceptionWhenGetContainerReports) {
html
.p()
._(
"Sorry, Failed to get containers for application attempt" + attemptid
+ ".")._();
return;
}
createAttemptHeadRoomTable(html);
html._(InfoBlock.class);
createTablesForAttemptMetrics(html);
// Container Table
TBODY<TABLE<Hamlet>> tbody =
html.table("#containers").thead().tr().th(".id", "Container ID")
.th(".node", "Node").th(".exitstatus", "Container Exit Status")
.th(".logs", "Logs")._()._().tbody();
StringBuilder containersTableData = new StringBuilder("[\n");
for (ContainerReport containerReport : containers) {
ContainerInfo container = new ContainerInfo(containerReport);
containersTableData
.append("[\"<a href='")
.append(url("container", container.getContainerId()))
.append("'>")
.append(container.getContainerId())
.append("</a>\",\"<a ")
.append(
container.getNodeHttpAddress() == null ? "#" : "href='"
+ container.getNodeHttpAddress())
.append("'>")
.append(container.getNodeHttpAddress() == null ? "N/A" :
StringEscapeUtils.escapeJavaScript(StringEscapeUtils
.escapeHtml(container.getNodeHttpAddress())))
.append("</a>\",\"")
.append(container.getContainerExitStatus()).append("\",\"<a href='")
.append(container.getLogUrl() == null ?
"#" : container.getLogUrl()).append("'>")
.append(container.getLogUrl() == null ?
"N/A" : "Logs").append("</a>\"],\n");
}
if (containersTableData.charAt(containersTableData.length() - 2) == ',') {
containersTableData.delete(containersTableData.length() - 2,
containersTableData.length() - 1);
}
containersTableData.append("]");
html.script().$type("text/javascript")
._("var containersTableData=" + containersTableData)._();
tbody._()._();
}
protected void generateOverview(ApplicationAttemptReport appAttemptReport,
Collection<ContainerReport> containers, AppAttemptInfo appAttempt,
String node) {
String amContainerId = appAttempt.getAmContainerId();
info("Application Attempt Overview")
._(
"Application Attempt State:",
appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
.getAppAttemptState())
._("AM Container:",
amContainerId == null
|| containers == null
|| !hasAMContainer(appAttemptReport.getAMContainerId(),
containers) ? null : root_url("container", amContainerId),
amContainerId == null ? "N/A" : amContainerId)
._("Node:", node)
._(
"Tracking URL:",
appAttempt.getTrackingUrl() == null
|| appAttempt.getTrackingUrl().equals(UNAVAILABLE) ? null
: root_url(appAttempt.getTrackingUrl()),
appAttempt.getTrackingUrl() == null
|| appAttempt.getTrackingUrl().equals(UNAVAILABLE)
? "Unassigned"
: appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FINISHED
|| appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED
|| appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED
? "History" : "ApplicationMaster")
._(
"Diagnostics Info:",
appAttempt.getDiagnosticsInfo() == null ? "" : appAttempt
.getDiagnosticsInfo());
}
protected boolean hasAMContainer(ContainerId containerId,
Collection<ContainerReport> containers) {
for (ContainerReport container : containers) {
if (containerId.equals(container.getContainerId())) {
return true;
}
}
return false;
}
protected void createAttemptHeadRoomTable(Block html) {
}
protected void createTablesForAttemptMetrics(Block html) {
}
}
| 9,408 | 37.247967 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by joblicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import java.util.ArrayList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
@XmlRootElement(name = "appAttempts")
@XmlAccessorType(XmlAccessType.FIELD)
public class AppAttemptsInfo {
@XmlElement(name = "appAttempt")
protected ArrayList<AppAttemptInfo> attempt = new ArrayList<AppAttemptInfo>();
public AppAttemptsInfo() {
// JAXB needs this
}
public void add(AppAttemptInfo info) {
this.attempt.add(info);
}
public ArrayList<AppAttemptInfo> getAttempts() {
return this.attempt;
}
}
| 1,673 | 31.192308 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import static org.apache.hadoop.yarn.util.StringHelper.CSV_JOINER;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Times;
@Public
@Evolving
@XmlRootElement(name = "app")
@XmlAccessorType(XmlAccessType.FIELD)
public class AppInfo {
protected String appId;
protected String currentAppAttemptId;
protected String user;
protected String name;
protected String queue;
protected String type;
protected String host;
protected int rpcPort;
protected YarnApplicationState appState;
protected int runningContainers;
protected float progress;
protected String diagnosticsInfo;
protected String originalTrackingUrl;
protected String trackingUrl;
protected FinalApplicationStatus finalAppStatus;
protected long submittedTime;
protected long startedTime;
protected long finishedTime;
protected long elapsedTime;
protected String applicationTags;
protected int priority;
private int allocatedCpuVcores;
private int allocatedMemoryMB;
protected boolean unmanagedApplication;
public AppInfo() {
// JAXB needs this
}
public AppInfo(ApplicationReport app) {
appId = app.getApplicationId().toString();
if (app.getCurrentApplicationAttemptId() != null) {
currentAppAttemptId = app.getCurrentApplicationAttemptId().toString();
}
user = app.getUser();
queue = app.getQueue();
name = app.getName();
type = app.getApplicationType();
host = app.getHost();
rpcPort = app.getRpcPort();
appState = app.getYarnApplicationState();
diagnosticsInfo = app.getDiagnostics();
trackingUrl = app.getTrackingUrl();
originalTrackingUrl = app.getOriginalTrackingUrl();
submittedTime = app.getStartTime();
startedTime = app.getStartTime();
finishedTime = app.getFinishTime();
elapsedTime = Times.elapsed(startedTime, finishedTime);
finalAppStatus = app.getFinalApplicationStatus();
priority = 0;
if (app.getPriority() != null) {
priority = app.getPriority().getPriority();
}
if (app.getApplicationResourceUsageReport() != null) {
runningContainers = app.getApplicationResourceUsageReport()
.getNumUsedContainers();
if (app.getApplicationResourceUsageReport().getUsedResources() != null) {
allocatedCpuVcores = app.getApplicationResourceUsageReport()
.getUsedResources().getVirtualCores();
allocatedMemoryMB = app.getApplicationResourceUsageReport()
.getUsedResources().getMemory();
}
}
progress = app.getProgress() * 100; // in percent
if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) {
this.applicationTags = CSV_JOINER.join(app.getApplicationTags());
}
unmanagedApplication = app.isUnmanagedApp();
}
public String getAppId() {
return appId;
}
public String getCurrentAppAttemptId() {
return currentAppAttemptId;
}
public String getUser() {
return user;
}
public String getName() {
return name;
}
public String getQueue() {
return queue;
}
public String getType() {
return type;
}
public String getHost() {
return host;
}
public int getRpcPort() {
return rpcPort;
}
public YarnApplicationState getAppState() {
return appState;
}
public int getRunningContainers() {
return runningContainers;
}
public int getAllocatedCpuVcores() {
return allocatedCpuVcores;
}
public int getAllocatedMemoryMB() {
return allocatedMemoryMB;
}
public float getProgress() {
return progress;
}
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
public String getOriginalTrackingUrl() {
return originalTrackingUrl;
}
public String getTrackingUrl() {
return trackingUrl;
}
public FinalApplicationStatus getFinalAppStatus() {
return finalAppStatus;
}
public long getSubmittedTime() {
return submittedTime;
}
public long getStartedTime() {
return startedTime;
}
public long getFinishedTime() {
return finishedTime;
}
public long getElapsedTime() {
return elapsedTime;
}
public String getApplicationTags() {
return applicationTags;
}
public boolean isUnmanagedApp() {
return unmanagedApplication;
}
public int getPriority() {
return priority;
}
}
| 5,614 | 26.125604 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import java.util.ArrayList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
@Public
@Evolving
@XmlRootElement(name = "apps")
@XmlAccessorType(XmlAccessType.FIELD)
public class AppsInfo {
protected ArrayList<AppInfo> app = new ArrayList<>();
public AppsInfo() {
// JAXB needs this
}
public void add(AppInfo appinfo) {
app.add(appinfo);
}
public ArrayList<AppInfo> getApps() {
return app;
}
}
| 1,516 | 29.959184 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppAttemptInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
@Public
@Evolving
@XmlRootElement(name = "appAttempt")
@XmlAccessorType(XmlAccessType.FIELD)
public class AppAttemptInfo {
protected String appAttemptId;
protected String host;
protected int rpcPort;
protected String trackingUrl;
protected String originalTrackingUrl;
protected String diagnosticsInfo;
protected YarnApplicationAttemptState appAttemptState;
protected String amContainerId;
protected long startedTime;
protected long finishedTime;
public AppAttemptInfo() {
// JAXB needs this
}
public AppAttemptInfo(ApplicationAttemptReport appAttempt) {
appAttemptId = appAttempt.getApplicationAttemptId().toString();
host = appAttempt.getHost();
rpcPort = appAttempt.getRpcPort();
trackingUrl = appAttempt.getTrackingUrl();
originalTrackingUrl = appAttempt.getOriginalTrackingUrl();
diagnosticsInfo = appAttempt.getDiagnostics();
appAttemptState = appAttempt.getYarnApplicationAttemptState();
if (appAttempt.getAMContainerId() != null) {
amContainerId = appAttempt.getAMContainerId().toString();
}
startedTime = appAttempt.getStartTime();
finishedTime = appAttempt.getFinishTime();
}
public String getAppAttemptId() {
return appAttemptId;
}
public String getHost() {
return host;
}
public int getRpcPort() {
return rpcPort;
}
public String getTrackingUrl() {
return trackingUrl;
}
public String getOriginalTrackingUrl() {
return originalTrackingUrl;
}
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
public YarnApplicationAttemptState getAppAttemptState() {
return appAttemptState;
}
public String getAmContainerId() {
return amContainerId;
}
public long getStartedTime() {
return startedTime;
}
public long getFinishedTime() {
return finishedTime;
}
}
| 3,128 | 27.972222 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.util.Times;
@Public
@Evolving
@XmlRootElement(name = "container")
@XmlAccessorType(XmlAccessType.FIELD)
public class ContainerInfo {
protected String containerId;
protected int allocatedMB;
protected int allocatedVCores;
protected String assignedNodeId;
protected int priority;
protected long startedTime;
protected long finishedTime;
protected long elapsedTime;
protected String diagnosticsInfo;
protected String logUrl;
protected int containerExitStatus;
protected ContainerState containerState;
protected String nodeHttpAddress;
public ContainerInfo() {
// JAXB needs this
}
public ContainerInfo(ContainerReport container) {
containerId = container.getContainerId().toString();
if (container.getAllocatedResource() != null) {
allocatedMB = container.getAllocatedResource().getMemory();
allocatedVCores = container.getAllocatedResource().getVirtualCores();
}
if (container.getAssignedNode() != null) {
assignedNodeId = container.getAssignedNode().toString();
}
priority = container.getPriority().getPriority();
startedTime = container.getCreationTime();
finishedTime = container.getFinishTime();
elapsedTime = Times.elapsed(startedTime, finishedTime);
diagnosticsInfo = container.getDiagnosticsInfo();
logUrl = container.getLogUrl();
containerExitStatus = container.getContainerExitStatus();
containerState = container.getContainerState();
nodeHttpAddress = container.getNodeHttpAddress();
}
public String getContainerId() {
return containerId;
}
public int getAllocatedMB() {
return allocatedMB;
}
public int getAllocatedVCores() {
return allocatedVCores;
}
public String getAssignedNodeId() {
return assignedNodeId;
}
public int getPriority() {
return priority;
}
public long getStartedTime() {
return startedTime;
}
public long getFinishedTime() {
return finishedTime;
}
public long getElapsedTime() {
return elapsedTime;
}
public String getDiagnosticsInfo() {
return diagnosticsInfo;
}
public String getLogUrl() {
return logUrl;
}
public int getContainerExitStatus() {
return containerExitStatus;
}
public ContainerState getContainerState() {
return containerState;
}
public String getNodeHttpAddress() {
return nodeHttpAddress;
}
}
| 3,662 | 27.617188 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainersInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webapp.dao;
import java.util.ArrayList;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
@XmlRootElement(name = "containers")
@XmlAccessorType(XmlAccessType.FIELD)
public class ContainersInfo {
protected ArrayList<ContainerInfo> container = new ArrayList<ContainerInfo>();
public ContainersInfo() {
// JAXB needs this
}
public void add(ContainerInfo containerInfo) {
container.add(containerInfo);
}
public ArrayList<ContainerInfo> getContainers() {
return container;
}
}
| 1,601 | 31.693878 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/SCMUploaderProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse;
/**
* <p>
* The protocol between a <code>NodeManager's</code>
* <code>SharedCacheUploadService</code> and the
* <code>SharedCacheManager.</code>
* </p>
*/
@Private
@Unstable
public interface SCMUploaderProtocol {
/**
* <p>
* The method used by the NodeManager's <code>SharedCacheUploadService</code>
* to notify the shared cache manager of a newly cached resource.
* </p>
*
* <p>
* The <code>SharedCacheManager</code> responds with whether or not the
* NodeManager should delete the uploaded file.
* </p>
*
* @param request notify the shared cache manager of a newly uploaded resource
* to the shared cache
* @return response indicating if the newly uploaded resource should be
* deleted
* @throws YarnException
* @throws IOException
*/
public SCMUploaderNotifyResponse
notify(SCMUploaderNotifyRequest request)
throws YarnException, IOException;
/**
* <p>
* The method used by the NodeManager's <code>SharedCacheUploadService</code>
* to request whether a resource can be uploaded.
* </p>
*
* <p>
* The <code>SharedCacheManager</code> responds with whether or not the
* NodeManager can upload the file.
* </p>
*
* @param request whether the resource can be uploaded to the shared cache
* @return response indicating if resource can be uploaded to the shared cache
* @throws YarnException
* @throws IOException
*/
public SCMUploaderCanUploadResponse
canUpload(SCMUploaderCanUploadRequest request)
throws YarnException, IOException;
}
| 3,000 | 34.72619 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.client.RMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import com.google.common.base.Preconditions;
public class ServerRMProxy<T> extends RMProxy<T> {
private static final Log LOG = LogFactory.getLog(ServerRMProxy.class);
private static final ServerRMProxy INSTANCE = new ServerRMProxy();
private ServerRMProxy() {
super();
}
/**
* Create a proxy to the ResourceManager for the specified protocol.
* @param configuration Configuration with all the required information.
* @param protocol Server protocol for which proxy is being requested.
* @param <T> Type of proxy.
* @return Proxy to the ResourceManager for the specified server protocol.
* @throws IOException
*/
public static <T> T createRMProxy(final Configuration configuration,
final Class<T> protocol) throws IOException {
return createRMProxy(configuration, protocol, INSTANCE);
}
@InterfaceAudience.Private
@Override
protected InetSocketAddress getRMAddress(YarnConfiguration conf,
Class<?> protocol) {
if (protocol == ResourceTracker.class) {
return conf.getSocketAddr(
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
} else {
String message = "Unsupported protocol found when creating the proxy " +
"connection to ResourceManager: " +
((protocol != null) ? protocol.getClass().getName() : "null");
LOG.error(message);
throw new IllegalStateException(message);
}
}
@InterfaceAudience.Private
@Override
protected void checkAllowedProtocols(Class<?> protocol) {
Preconditions.checkArgument(
protocol.isAssignableFrom(ResourceTracker.class),
"ResourceManager does not support this protocol");
}
}
| 3,008 | 37.088608 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/SCMUploaderProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.SCMUploaderProtocol.SCMUploaderProtocolService;
@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB",
protocolVersion = 1)
public interface SCMUploaderProtocolPB extends
SCMUploaderProtocolService.BlockingInterface {
}
| 1,193 | 40.172414 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService;
@ProtocolInfo(
protocolName = "org.apache.hadoop.yarn.server.api.ResourceTrackerPB",
protocolVersion = 1)
public interface ResourceTrackerPB extends ResourceTrackerService.BlockingInterface {
}
| 1,174 | 39.517241 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import java.io.IOException;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
/**
* This is used by the Node Manager to register/nodeHeartbeat/unregister with
* the ResourceManager.
*/
public interface ResourceTracker {
@Idempotent
RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException;
@AtMostOnce
NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException;
@Idempotent
UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException;
}
| 2,099 | 41 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
public interface ResourceManagerConstants {
/**
* This states the invalid identifier of Resource Manager. This is used as a
* default value for initializing RM identifier. Currently, RM is using time
* stamp as RM identifier.
*/
public static final long RM_INVALID_IDENTIFIER = -1;
}
| 1,153 | 37.466667 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SCMUploaderNotifyRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* The request from the NodeManager to the <code>SharedCacheManager</code> that
* notifies that a resource has been uploaded to the shared cache. The
* <code>SharedCacheManager</code> may reject the resource for various reasons,
* in which case the NodeManager should remove it from the shared cache.
* </p>
*/
@Private
@Unstable
public abstract class SCMUploaderNotifyRequest {
/**
* Get the filename of the resource that was just uploaded to the shared
* cache.
*
* @return the filename
*/
public abstract String getFileName();
/**
* Set the filename of the resource that was just uploaded to the shared
* cache.
*
* @param filename the name of the file
*/
public abstract void setFilename(String filename);
/**
* Get the <code>key</code> of the resource that was just uploaded to the
* shared cache.
*
* @return <code>key</code>
*/
public abstract String getResourceKey();
/**
* Set the <code>key</code> of the resource that was just uploaded to the
* shared cache.
*
* @param key unique identifier for the resource
*/
public abstract void setResourceKey(String key);
}
| 2,174 | 30.985294 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SCMUploaderCanUploadRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* The request from the NodeManager to the <code>SharedCacheManager</code> that
* requests whether it can upload a resource in the shared cache.
* </p>
*/
@Private
@Unstable
public abstract class SCMUploaderCanUploadRequest {
/**
* Get the <code>key</code> of the resource that would be uploaded to the
* shared cache.
*
* @return <code>key</code>
*/
public abstract String getResourceKey();
/**
* Set the <code>key</code> of the resource that would be uploaded to the
* shared cache.
*
* @param key unique identifier for the resource
*/
public abstract void setResourceKey(String key);
}
| 1,648 | 31.98 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SCMUploaderNotifyResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* The response from the SharedCacheManager to the NodeManager that indicates
* whether the NodeManager needs to delete the cached resource it was sending
* the notification for.
* </p>
*/
@Private
@Unstable
public abstract class SCMUploaderNotifyResponse {
/**
* Get whether or not the shared cache manager has accepted the notified
* resource (i.e. the uploaded file should remain in the cache).
*
* @return boolean True if the resource has been accepted, false otherwise.
*/
public abstract boolean getAccepted();
/**
* Set whether or not the shared cache manager has accepted the notified
* resource (i.e. the uploaded file should remain in the cache).
*
* @param b True if the resource has been accepted, false otherwise.
*/
public abstract void setAccepted(boolean b);
}
| 1,838 | 34.365385 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
public interface NodeHeartbeatResponse {
int getResponseId();
NodeAction getNodeAction();
List<ContainerId> getContainersToCleanup();
List<ContainerId> getContainersToBeRemovedFromNM();
List<ApplicationId> getApplicationsToCleanup();
void setResponseId(int responseId);
void setNodeAction(NodeAction action);
MasterKey getContainerTokenMasterKey();
void setContainerTokenMasterKey(MasterKey secretKey);
MasterKey getNMTokenMasterKey();
void setNMTokenMasterKey(MasterKey secretKey);
void addAllContainersToCleanup(List<ContainerId> containers);
// This tells NM to remove finished containers from its context. Currently, NM
// will remove finished containers from its context only after AM has actually
// received the finished containers in a previous allocate response
void addContainersToBeRemovedFromNM(List<ContainerId> containers);
void addAllApplicationsToCleanup(List<ApplicationId> applications);
long getNextHeartBeatInterval();
void setNextHeartBeatInterval(long nextHeartBeatInterval);
String getDiagnosticsMessage();
void setDiagnosticsMessage(String diagnosticsMessage);
// Credentials (i.e. hdfs tokens) needed by NodeManagers for application
// localizations and logAggreations.
Map<ApplicationId, ByteBuffer> getSystemCredentialsForApps();
void setSystemCredentialsForApps(
Map<ApplicationId, ByteBuffer> systemCredentials);
boolean getAreNodeLabelsAcceptedByRM();
void setAreNodeLabelsAcceptedByRM(boolean areNodeLabelsAcceptedByRM);
}
| 2,712 | 35.662162 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
public interface RegisterNodeManagerResponse {
MasterKey getContainerTokenMasterKey();
void setContainerTokenMasterKey(MasterKey secretKey);
MasterKey getNMTokenMasterKey();
void setNMTokenMasterKey(MasterKey secretKey);
NodeAction getNodeAction();
void setNodeAction(NodeAction nodeAction);
long getRMIdentifier();
void setRMIdentifier(long rmIdentifier);
String getDiagnosticsMessage();
void setDiagnosticsMessage(String diagnosticsMessage);
void setRMVersion(String version);
String getRMVersion();
boolean getAreNodeLabelsAcceptedByRM();
void setAreNodeLabelsAcceptedByRM(boolean areNodeLabelsAcceptedByRM);
}
| 1,646 | 30.673077 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
/**
* Node Manager's unregister request.
*/
public abstract class UnRegisterNodeManagerRequest {
public static UnRegisterNodeManagerRequest newInstance(NodeId nodeId) {
UnRegisterNodeManagerRequest nodeHeartbeatRequest = Records
.newRecord(UnRegisterNodeManagerRequest.class);
nodeHeartbeatRequest.setNodeId(nodeId);
return nodeHeartbeatRequest;
}
public abstract NodeId getNodeId();
public abstract void setNodeId(NodeId nodeId);
}
| 1,428 | 35.641026 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Records;
public abstract class RegisterNodeManagerRequest {
public static RegisterNodeManagerRequest newInstance(NodeId nodeId,
int httpPort, Resource resource, String nodeManagerVersionId,
List<NMContainerStatus> containerStatuses,
List<ApplicationId> runningApplications) {
return newInstance(nodeId, httpPort, resource, nodeManagerVersionId,
containerStatuses, runningApplications, null);
}
public static RegisterNodeManagerRequest newInstance(NodeId nodeId,
int httpPort, Resource resource, String nodeManagerVersionId,
List<NMContainerStatus> containerStatuses,
List<ApplicationId> runningApplications, Set<NodeLabel> nodeLabels) {
RegisterNodeManagerRequest request =
Records.newRecord(RegisterNodeManagerRequest.class);
request.setHttpPort(httpPort);
request.setResource(resource);
request.setNodeId(nodeId);
request.setNMVersion(nodeManagerVersionId);
request.setContainerStatuses(containerStatuses);
request.setRunningApplications(runningApplications);
request.setNodeLabels(nodeLabels);
return request;
}
public abstract NodeId getNodeId();
public abstract int getHttpPort();
public abstract Resource getResource();
public abstract String getNMVersion();
public abstract List<NMContainerStatus> getNMContainerStatuses();
public abstract Set<NodeLabel> getNodeLabels();
public abstract void setNodeLabels(Set<NodeLabel> nodeLabels);
/**
* We introduce this here because currently YARN RM doesn't persist nodes info
* for application running. When RM restart happened, we cannot determinate if
* a node should do application cleanup (like log-aggregation, status update,
* etc.) or not.
* <p>
* When we have this running application list in node manager register
* request, we can recover nodes info for running applications. And then we
* can take actions accordingly
*
* @return running application list in this node
*/
public abstract List<ApplicationId> getRunningApplications();
public abstract void setNodeId(NodeId nodeId);
public abstract void setHttpPort(int port);
public abstract void setResource(Resource resource);
public abstract void setNMVersion(String version);
public abstract void setContainerStatuses(
List<NMContainerStatus> containerStatuses);
/**
* Setter for {@link RegisterNodeManagerRequest#getRunningApplications()}
* @param runningApplications running application in this node
*/
public abstract void setRunningApplications(
List<ApplicationId> runningApplications);
}
| 3,752 | 39.793478 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SCMUploaderCanUploadResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* <p>
* The response from the SharedCacheManager to the NodeManager that indicates
* whether the NodeManager can upload the resource to the shared cache. If it is
* not accepted by SCM, the NodeManager should not upload it to the shared
* cache.
* </p>
*/
@Private
@Unstable
public abstract class SCMUploaderCanUploadResponse {
/**
* Get whether or not the node manager can upload the resource to the shared
* cache.
*
* @return boolean True if the resource can be uploaded, false otherwise.
*/
public abstract boolean getUploadable();
/**
* Set whether or not the node manager can upload the resource to the shared
* cache.
*
* @param b True if the resource can be uploaded, false otherwise.
*/
public abstract void setUploadable(boolean b);
}
| 1,802 | 33.018868 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.util.Records;
/**
* NMContainerStatus includes the current information of a container. This
* record is used by YARN only, whereas {@link ContainerStatus} is used both
* inside YARN and by end-users.
*/
public abstract class NMContainerStatus {
// Used by tests only
public static NMContainerStatus newInstance(ContainerId containerId,
ContainerState containerState, Resource allocatedResource,
String diagnostics, int containerExitStatus, Priority priority,
long creationTime) {
return newInstance(containerId, containerState, allocatedResource,
diagnostics, containerExitStatus, priority, creationTime,
CommonNodeLabelsManager.NO_LABEL);
}
public static NMContainerStatus newInstance(ContainerId containerId,
ContainerState containerState, Resource allocatedResource,
String diagnostics, int containerExitStatus, Priority priority,
long creationTime, String nodeLabelExpression) {
NMContainerStatus status =
Records.newRecord(NMContainerStatus.class);
status.setContainerId(containerId);
status.setContainerState(containerState);
status.setAllocatedResource(allocatedResource);
status.setDiagnostics(diagnostics);
status.setContainerExitStatus(containerExitStatus);
status.setPriority(priority);
status.setCreationTime(creationTime);
status.setNodeLabelExpression(nodeLabelExpression);
return status;
}
/**
* Get the <code>ContainerId</code> of the container.
*
* @return <code>ContainerId</code> of the container.
*/
public abstract ContainerId getContainerId();
public abstract void setContainerId(ContainerId containerId);
/**
* Get the allocated <code>Resource</code> of the container.
*
* @return allocated <code>Resource</code> of the container.
*/
public abstract Resource getAllocatedResource();
public abstract void setAllocatedResource(Resource resource);
/**
* Get the DiagnosticsInfo of the container.
*
* @return DiagnosticsInfo of the container
*/
public abstract String getDiagnostics();
public abstract void setDiagnostics(String diagnostics);
public abstract ContainerState getContainerState();
public abstract void setContainerState(ContainerState containerState);
/**
* Get the final <code>exit status</code> of the container.
*
* @return final <code>exit status</code> of the container.
*/
public abstract int getContainerExitStatus();
public abstract void setContainerExitStatus(int containerExitStatus);
/**
* Get the <code>Priority</code> of the request.
* @return <code>Priority</code> of the request
*/
public abstract Priority getPriority();
public abstract void setPriority(Priority priority);
/**
* Get the time when the container is created
*/
public abstract long getCreationTime();
public abstract void setCreationTime(long creationTime);
/**
* Get the node-label-expression in the original ResourceRequest
*/
public abstract String getNodeLabelExpression();
public abstract void setNodeLabelExpression(
String nodeLabelExpression);
}
| 4,361 | 32.813953 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.yarn.util.Records;
/**
* Node Manager's unregister response.
*/
public abstract class UnRegisterNodeManagerResponse {
public static UnRegisterNodeManagerResponse newInstance() {
return Records.newRecord(UnRegisterNodeManagerResponse.class);
}
}
| 1,149 | 36.096774 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/LogAggregationReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.util.Records;
/**
* {@code LogAggregationReport} is a report for log aggregation status
* in one NodeManager of an application.
* <p>
* It includes details such as:
* <ul>
* <li>{@link ApplicationId} of the application.</li>
* <li>{@link LogAggregationStatus}</li>
* <li>Diagnostic information</li>
* </ul>
*
*/
@Public
@Unstable
public abstract class LogAggregationReport {
@Public
@Unstable
public static LogAggregationReport newInstance(ApplicationId appId,
LogAggregationStatus status, String diagnosticMessage) {
LogAggregationReport report = Records.newRecord(LogAggregationReport.class);
report.setApplicationId(appId);
report.setLogAggregationStatus(status);
report.setDiagnosticMessage(diagnosticMessage);
return report;
}
/**
* Get the <code>ApplicationId</code> of the application.
* @return <code>ApplicationId</code> of the application
*/
@Public
@Unstable
public abstract ApplicationId getApplicationId();
@Public
@Unstable
public abstract void setApplicationId(ApplicationId appId);
/**
* Get the <code>LogAggregationStatus</code>.
* @return <code>LogAggregationStatus</code>
*/
@Public
@Unstable
public abstract LogAggregationStatus getLogAggregationStatus();
@Public
@Unstable
public abstract void setLogAggregationStatus(
LogAggregationStatus logAggregationStatus);
/**
* Get the <em>diagnositic information</em> of this log aggregation
* @return <em>diagnositic information</em> of this log aggregation
*/
@Public
@Unstable
public abstract String getDiagnosticMessage();
@Public
@Unstable
public abstract void setDiagnosticMessage(String diagnosticMessage);
}
| 2,868 | 30.527473 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.util.Records;
public abstract class NodeHeartbeatRequest {
public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
MasterKey lastKnownContainerTokenMasterKey,
MasterKey lastKnownNMTokenMasterKey, Set<NodeLabel> nodeLabels) {
NodeHeartbeatRequest nodeHeartbeatRequest =
Records.newRecord(NodeHeartbeatRequest.class);
nodeHeartbeatRequest.setNodeStatus(nodeStatus);
nodeHeartbeatRequest
.setLastKnownContainerTokenMasterKey(lastKnownContainerTokenMasterKey);
nodeHeartbeatRequest
.setLastKnownNMTokenMasterKey(lastKnownNMTokenMasterKey);
nodeHeartbeatRequest.setNodeLabels(nodeLabels);
return nodeHeartbeatRequest;
}
public abstract NodeStatus getNodeStatus();
public abstract void setNodeStatus(NodeStatus status);
public abstract MasterKey getLastKnownContainerTokenMasterKey();
public abstract void setLastKnownContainerTokenMasterKey(MasterKey secretKey);
public abstract MasterKey getLastKnownNMTokenMasterKey();
public abstract void setLastKnownNMTokenMasterKey(MasterKey secretKey);
public abstract Set<NodeLabel> getNodeLabels();
public abstract void setNodeLabels(Set<NodeLabel> nodeLabels);
public abstract List<LogAggregationReport>
getLogAggregationReportsForApps();
public abstract void setLogAggregationReportsForApps(
List<LogAggregationReport> logAggregationReportsForApps);
}
| 2,533 | 39.222222 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest {
RegisterNodeManagerRequestProto proto = RegisterNodeManagerRequestProto.getDefaultInstance();
RegisterNodeManagerRequestProto.Builder builder = null;
boolean viaProto = false;
private Resource resource = null;
private NodeId nodeId = null;
private List<NMContainerStatus> containerStatuses = null;
private List<ApplicationId> runningApplications = null;
private Set<NodeLabel> labels = null;
public RegisterNodeManagerRequestPBImpl() {
builder = RegisterNodeManagerRequestProto.newBuilder();
}
public RegisterNodeManagerRequestPBImpl(RegisterNodeManagerRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RegisterNodeManagerRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.containerStatuses != null) {
addNMContainerStatusesToProto();
}
if (this.runningApplications != null) {
addRunningApplicationsToProto();
}
if (this.resource != null) {
builder.setResource(convertToProtoFormat(this.resource));
}
if (this.nodeId != null) {
builder.setNodeId(convertToProtoFormat(this.nodeId));
}
if (this.labels != null) {
builder.clearNodeLabels();
Builder newBuilder = NodeLabelsProto.newBuilder();
for (NodeLabel label : labels) {
newBuilder.addNodeLabels(convertToProtoFormat(label));
}
builder.setNodeLabels(newBuilder.build());
}
}
private synchronized void addNMContainerStatusesToProto() {
maybeInitBuilder();
builder.clearContainerStatuses();
List<NMContainerStatusProto> list =
new ArrayList<NMContainerStatusProto>();
for (NMContainerStatus status : this.containerStatuses) {
list.add(convertToProtoFormat(status));
}
builder.addAllContainerStatuses(list);
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RegisterNodeManagerRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public Resource getResource() {
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.resource != null) {
return this.resource;
}
if (!p.hasResource()) {
return null;
}
this.resource = convertFromProtoFormat(p.getResource());
return this.resource;
}
@Override
public void setResource(Resource resource) {
maybeInitBuilder();
if (resource == null)
builder.clearResource();
this.resource = resource;
}
@Override
public NodeId getNodeId() {
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.nodeId != null) {
return this.nodeId;
}
if (!p.hasNodeId()) {
return null;
}
this.nodeId = convertFromProtoFormat(p.getNodeId());
return this.nodeId;
}
@Override
public void setNodeId(NodeId nodeId) {
maybeInitBuilder();
if (nodeId == null)
builder.clearNodeId();
this.nodeId = nodeId;
}
@Override
public int getHttpPort() {
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasHttpPort()) {
return 0;
}
return (p.getHttpPort());
}
@Override
public void setHttpPort(int httpPort) {
maybeInitBuilder();
builder.setHttpPort(httpPort);
}
@Override
public List<ApplicationId> getRunningApplications() {
initRunningApplications();
return runningApplications;
}
private void initRunningApplications() {
if (this.runningApplications != null) {
return;
}
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
List<ApplicationIdProto> list = p.getRunningApplicationsList();
this.runningApplications = new ArrayList<ApplicationId>();
for (ApplicationIdProto c : list) {
this.runningApplications.add(convertFromProtoFormat(c));
}
}
@Override
public void setRunningApplications(List<ApplicationId> apps) {
if (apps == null) {
return;
}
initRunningApplications();
this.runningApplications.addAll(apps);
}
private void addRunningApplicationsToProto() {
maybeInitBuilder();
builder.clearRunningApplications();
if (runningApplications == null) {
return;
}
Iterable<ApplicationIdProto> it = new Iterable<ApplicationIdProto>() {
@Override
public Iterator<ApplicationIdProto> iterator() {
return new Iterator<ApplicationIdProto>() {
Iterator<ApplicationId> iter = runningApplications.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ApplicationIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllRunningApplications(it);
}
@Override
public List<NMContainerStatus> getNMContainerStatuses() {
initContainerRecoveryReports();
return containerStatuses;
}
private void initContainerRecoveryReports() {
if (this.containerStatuses != null) {
return;
}
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
List<NMContainerStatusProto> list = p.getContainerStatusesList();
this.containerStatuses = new ArrayList<NMContainerStatus>();
for (NMContainerStatusProto c : list) {
this.containerStatuses.add(convertFromProtoFormat(c));
}
}
@Override
public void setContainerStatuses(
List<NMContainerStatus> containerReports) {
if (containerReports == null) {
return;
}
initContainerRecoveryReports();
this.containerStatuses.addAll(containerReports);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String getNMVersion() {
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNmVersion()) {
return "";
}
return (p.getNmVersion());
}
@Override
public void setNMVersion(String version) {
maybeInitBuilder();
builder.setNmVersion(version);
}
@Override
public Set<NodeLabel> getNodeLabels() {
initNodeLabels();
return this.labels;
}
@Override
public void setNodeLabels(Set<NodeLabel> nodeLabels) {
maybeInitBuilder();
builder.clearNodeLabels();
this.labels = nodeLabels;
}
private void initNodeLabels() {
if (this.labels != null) {
return;
}
RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeLabels()) {
labels=null;
return;
}
NodeLabelsProto nodeLabels = p.getNodeLabels();
labels = new HashSet<NodeLabel>();
for(NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
labels.add(convertFromProtoFormat(nlp));
}
}
private NodeLabelPBImpl convertFromProtoFormat(NodeLabelProto p) {
return new NodeLabelPBImpl(p);
}
private NodeLabelProto convertToProtoFormat(NodeLabel t) {
return ((NodeLabelPBImpl)t).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
return new ApplicationIdPBImpl(p);
}
private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
return ((ApplicationIdPBImpl)t).getProto();
}
private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
return new NodeIdPBImpl(p);
}
private NodeIdProto convertToProtoFormat(NodeId t) {
return ((NodeIdPBImpl)t).getProto();
}
private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
return new ResourcePBImpl(p);
}
private ResourceProto convertToProtoFormat(Resource t) {
return ((ResourcePBImpl)t).getProto();
}
private NMContainerStatusPBImpl convertFromProtoFormat(NMContainerStatusProto c) {
return new NMContainerStatusPBImpl(c);
}
private NMContainerStatusProto convertToProtoFormat(NMContainerStatus c) {
return ((NMContainerStatusPBImpl)c).getProto();
}
}
| 11,081 | 29.03252 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import com.google.protobuf.TextFormat;
public class NMContainerStatusPBImpl extends NMContainerStatus {
NMContainerStatusProto proto = NMContainerStatusProto
.getDefaultInstance();
NMContainerStatusProto.Builder builder = null;
boolean viaProto = false;
private ContainerId containerId = null;
private Resource resource = null;
private Priority priority = null;
public NMContainerStatusPBImpl() {
builder = NMContainerStatusProto.newBuilder();
}
public NMContainerStatusPBImpl(NMContainerStatusProto proto) {
this.proto = proto;
viaProto = true;
}
public NMContainerStatusProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return this.getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
@Override
public Resource getAllocatedResource() {
if (this.resource != null) {
return this.resource;
}
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasResource()) {
return null;
}
this.resource = convertFromProtoFormat(p.getResource());
return this.resource;
}
@Override
public ContainerId getContainerId() {
if (this.containerId != null) {
return this.containerId;
}
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainerId()) {
return null;
}
this.containerId = convertFromProtoFormat(p.getContainerId());
return this.containerId;
}
@Override
public String getDiagnostics() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return (p.getDiagnostics());
}
@Override
public ContainerState getContainerState() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasContainerState()) {
return null;
}
return convertFromProtoFormat(p.getContainerState());
}
@Override
public void setAllocatedResource(Resource resource) {
maybeInitBuilder();
if (resource == null)
builder.clearResource();
this.resource = resource;
}
public void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null)
builder.clearContainerId();
this.containerId = containerId;
}
@Override
public void setDiagnostics(String diagnosticsInfo) {
maybeInitBuilder();
if (diagnosticsInfo == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnosticsInfo);
}
@Override
public void setContainerState(ContainerState containerState) {
maybeInitBuilder();
if (containerState == null) {
builder.clearContainerState();
return;
}
builder.setContainerState(convertToProtoFormat(containerState));
}
@Override
public int getContainerExitStatus() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
return p.getContainerExitStatus();
}
@Override
public void setContainerExitStatus(int containerExitStatus) {
maybeInitBuilder();
builder.setContainerExitStatus(containerExitStatus);
}
@Override
public Priority getPriority() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (this.priority != null) {
return this.priority;
}
if (!p.hasPriority()) {
return null;
}
this.priority = convertFromProtoFormat(p.getPriority());
return this.priority;
}
@Override
public void setPriority(Priority priority) {
maybeInitBuilder();
if (priority == null)
builder.clearPriority();
this.priority = priority;
}
@Override
public long getCreationTime() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
return p.getCreationTime();
}
@Override
public void setCreationTime(long creationTime) {
maybeInitBuilder();
builder.setCreationTime(creationTime);
}
@Override
public String getNodeLabelExpression() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
if (p.hasNodeLabelExpression()) {
return p.getNodeLabelExpression();
}
return CommonNodeLabelsManager.NO_LABEL;
}
@Override
public void setNodeLabelExpression(String nodeLabelExpression) {
maybeInitBuilder();
if (nodeLabelExpression == null) {
builder.clearNodeLabelExpression();
return;
}
builder.setNodeLabelExpression(nodeLabelExpression);
}
private void mergeLocalToBuilder() {
if (this.containerId != null
&& !((ContainerIdPBImpl) containerId).getProto().equals(
builder.getContainerId())) {
builder.setContainerId(convertToProtoFormat(this.containerId));
}
if (this.resource != null
&& !((ResourcePBImpl) this.resource).getProto().equals(
builder.getResource())) {
builder.setResource(convertToProtoFormat(this.resource));
}
if (this.priority != null) {
builder.setPriority(convertToProtoFormat(this.priority));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NMContainerStatusProto.newBuilder(proto);
}
viaProto = false;
}
private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
return new ContainerIdPBImpl(p);
}
private ContainerIdProto convertToProtoFormat(ContainerId t) {
return ((ContainerIdPBImpl) t).getProto();
}
private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
return new ResourcePBImpl(p);
}
private ResourceProto convertToProtoFormat(Resource t) {
return ((ResourcePBImpl) t).getProto();
}
private ContainerStateProto
convertToProtoFormat(ContainerState containerState) {
return ProtoUtils.convertToProtoFormat(containerState);
}
private ContainerState convertFromProtoFormat(
ContainerStateProto containerState) {
return ProtoUtils.convertFromProtoFormat(containerState);
}
private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
return new PriorityPBImpl(p);
}
private PriorityProto convertToProtoFormat(Priority t) {
return ((PriorityPBImpl)t).getProto();
}
}
| 8,637 | 27.986577 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
/**
* PBImpl class for UnRegisterNodeManagerRequest.
*/
public class UnRegisterNodeManagerRequestPBImpl extends
UnRegisterNodeManagerRequest {
private UnRegisterNodeManagerRequestProto proto =
UnRegisterNodeManagerRequestProto.getDefaultInstance();
private UnRegisterNodeManagerRequestProto.Builder builder = null;
private boolean viaProto = false;
private NodeId nodeId = null;
public UnRegisterNodeManagerRequestPBImpl() {
builder = UnRegisterNodeManagerRequestProto.newBuilder();
}
public UnRegisterNodeManagerRequestPBImpl(
UnRegisterNodeManagerRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public UnRegisterNodeManagerRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.nodeId != null) {
builder.setNodeId(convertToProtoFormat(this.nodeId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = UnRegisterNodeManagerRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public NodeId getNodeId() {
UnRegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.nodeId != null) {
return this.nodeId;
}
if (!p.hasNodeId()) {
return null;
}
this.nodeId = convertFromProtoFormat(p.getNodeId());
return this.nodeId;
}
@Override
public void setNodeId(NodeId updatedNodeId) {
maybeInitBuilder();
if (updatedNodeId == null) {
builder.clearNodeId();
}
this.nodeId = updatedNodeId;
}
private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
return new NodeIdPBImpl(p);
}
private NodeIdProto convertToProtoFormat(NodeId t) {
return ((NodeIdPBImpl) t).getProto();
}
}
| 3,385 | 30.06422 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
/**
* PBImpl class for UnRegisterNodeManagerResponse.
*/
public class UnRegisterNodeManagerResponsePBImpl extends
UnRegisterNodeManagerResponse {
private UnRegisterNodeManagerResponseProto proto =
UnRegisterNodeManagerResponseProto.getDefaultInstance();
private UnRegisterNodeManagerResponseProto.Builder builder = null;
private boolean viaProto = false;
private boolean rebuild = false;
public UnRegisterNodeManagerResponsePBImpl() {
builder = UnRegisterNodeManagerResponseProto.newBuilder();
}
public UnRegisterNodeManagerResponsePBImpl(
UnRegisterNodeManagerResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public UnRegisterNodeManagerResponseProto getProto() {
if (rebuild) {
mergeLocalToProto();
}
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
proto = builder.build();
rebuild = false;
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = UnRegisterNodeManagerResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,285 | 31.197183 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl;
public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
NodeHeartbeatRequestProto proto = NodeHeartbeatRequestProto.getDefaultInstance();
NodeHeartbeatRequestProto.Builder builder = null;
boolean viaProto = false;
private NodeStatus nodeStatus = null;
private MasterKey lastKnownContainerTokenMasterKey = null;
private MasterKey lastKnownNMTokenMasterKey = null;
private Set<NodeLabel> labels = null;
private List<LogAggregationReport> logAggregationReportsForApps = null;
public NodeHeartbeatRequestPBImpl() {
builder = NodeHeartbeatRequestProto.newBuilder();
}
public NodeHeartbeatRequestPBImpl(NodeHeartbeatRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public NodeHeartbeatRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
private void mergeLocalToBuilder() {
if (this.nodeStatus != null) {
builder.setNodeStatus(convertToProtoFormat(this.nodeStatus));
}
if (this.lastKnownContainerTokenMasterKey != null) {
builder.setLastKnownContainerTokenMasterKey(
convertToProtoFormat(this.lastKnownContainerTokenMasterKey));
}
if (this.lastKnownNMTokenMasterKey != null) {
builder.setLastKnownNmTokenMasterKey(
convertToProtoFormat(this.lastKnownNMTokenMasterKey));
}
if (this.labels != null) {
builder.clearNodeLabels();
Builder newBuilder = NodeLabelsProto.newBuilder();
for (NodeLabel label : labels) {
newBuilder.addNodeLabels(convertToProtoFormat(label));
}
builder.setNodeLabels(newBuilder.build());
}
if (this.logAggregationReportsForApps != null) {
addLogAggregationStatusForAppsToProto();
}
}
private void addLogAggregationStatusForAppsToProto() {
maybeInitBuilder();
builder.clearLogAggregationReportsForApps();
if (this.logAggregationReportsForApps == null) {
return;
}
Iterable<LogAggregationReportProto> it =
new Iterable<LogAggregationReportProto>() {
@Override
public Iterator<LogAggregationReportProto> iterator() {
return new Iterator<LogAggregationReportProto>() {
private Iterator<LogAggregationReport> iter =
logAggregationReportsForApps.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public LogAggregationReportProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllLogAggregationReportsForApps(it);
}
private LogAggregationReportProto convertToProtoFormat(
LogAggregationReport value) {
return ((LogAggregationReportPBImpl) value).getProto();
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodeHeartbeatRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public NodeStatus getNodeStatus() {
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.nodeStatus != null) {
return this.nodeStatus;
}
if (!p.hasNodeStatus()) {
return null;
}
this.nodeStatus = convertFromProtoFormat(p.getNodeStatus());
return this.nodeStatus;
}
@Override
public void setNodeStatus(NodeStatus nodeStatus) {
maybeInitBuilder();
if (nodeStatus == null)
builder.clearNodeStatus();
this.nodeStatus = nodeStatus;
}
@Override
public MasterKey getLastKnownContainerTokenMasterKey() {
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.lastKnownContainerTokenMasterKey != null) {
return this.lastKnownContainerTokenMasterKey;
}
if (!p.hasLastKnownContainerTokenMasterKey()) {
return null;
}
this.lastKnownContainerTokenMasterKey =
convertFromProtoFormat(p.getLastKnownContainerTokenMasterKey());
return this.lastKnownContainerTokenMasterKey;
}
@Override
public void setLastKnownContainerTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearLastKnownContainerTokenMasterKey();
this.lastKnownContainerTokenMasterKey = masterKey;
}
@Override
public MasterKey getLastKnownNMTokenMasterKey() {
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.lastKnownNMTokenMasterKey != null) {
return this.lastKnownNMTokenMasterKey;
}
if (!p.hasLastKnownNmTokenMasterKey()) {
return null;
}
this.lastKnownNMTokenMasterKey =
convertFromProtoFormat(p.getLastKnownNmTokenMasterKey());
return this.lastKnownNMTokenMasterKey;
}
@Override
public void setLastKnownNMTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearLastKnownNmTokenMasterKey();
this.lastKnownNMTokenMasterKey = masterKey;
}
private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto p) {
return new NodeStatusPBImpl(p);
}
private NodeStatusProto convertToProtoFormat(NodeStatus t) {
return ((NodeStatusPBImpl)t).getProto();
}
private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) {
return new MasterKeyPBImpl(p);
}
private MasterKeyProto convertToProtoFormat(MasterKey t) {
return ((MasterKeyPBImpl)t).getProto();
}
@Override
public Set<NodeLabel> getNodeLabels() {
initNodeLabels();
return this.labels;
}
@Override
public void setNodeLabels(Set<NodeLabel> nodeLabels) {
maybeInitBuilder();
builder.clearNodeLabels();
this.labels = nodeLabels;
}
private void initNodeLabels() {
if (this.labels != null) {
return;
}
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeLabels()) {
labels = null;
return;
}
NodeLabelsProto nodeLabels = p.getNodeLabels();
labels = new HashSet<NodeLabel>();
for(NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
labels.add(convertFromProtoFormat(nlp));
}
}
private NodeLabelPBImpl convertFromProtoFormat(NodeLabelProto p) {
return new NodeLabelPBImpl(p);
}
private NodeLabelProto convertToProtoFormat(NodeLabel t) {
return ((NodeLabelPBImpl)t).getProto();
}
@Override
public List<LogAggregationReport> getLogAggregationReportsForApps() {
if (this.logAggregationReportsForApps != null) {
return this.logAggregationReportsForApps;
}
initLogAggregationReportsForApps();
return logAggregationReportsForApps;
}
private void initLogAggregationReportsForApps() {
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
List<LogAggregationReportProto> list =
p.getLogAggregationReportsForAppsList();
this.logAggregationReportsForApps = new ArrayList<LogAggregationReport>();
for (LogAggregationReportProto c : list) {
this.logAggregationReportsForApps.add(convertFromProtoFormat(c));
}
}
private LogAggregationReport convertFromProtoFormat(
LogAggregationReportProto logAggregationReport) {
return new LogAggregationReportPBImpl(logAggregationReport);
}
@Override
public void setLogAggregationReportsForApps(
List<LogAggregationReport> logAggregationStatusForApps) {
if(logAggregationStatusForApps == null) {
builder.clearLogAggregationReportsForApps();
}
this.logAggregationReportsForApps = logAggregationStatusForApps;
}
}
| 10,472 | 32.037855 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SCMUploaderNotifyRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest;
public class SCMUploaderNotifyRequestPBImpl extends SCMUploaderNotifyRequest {
SCMUploaderNotifyRequestProto proto =
SCMUploaderNotifyRequestProto.getDefaultInstance();
SCMUploaderNotifyRequestProto.Builder builder = null;
boolean viaProto = false;
public SCMUploaderNotifyRequestPBImpl() {
builder = SCMUploaderNotifyRequestProto.newBuilder();
}
public SCMUploaderNotifyRequestPBImpl(
SCMUploaderNotifyRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public SCMUploaderNotifyRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public String getResourceKey() {
SCMUploaderNotifyRequestProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasResourceKey()) ? p.getResourceKey() : null;
}
@Override
public void setResourceKey(String key) {
maybeInitBuilder();
if (key == null) {
builder.clearResourceKey();
return;
}
builder.setResourceKey(key);
}
@Override
public String getFileName() {
SCMUploaderNotifyRequestProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasFilename()) ? p.getFilename() : null;
}
@Override
public void setFilename(String filename) {
maybeInitBuilder();
if (filename == null) {
builder.clearFilename();
return;
}
builder.setFilename(filename);
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = SCMUploaderNotifyRequestProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,908 | 29.946809 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SCMUploaderCanUploadResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadResponseProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse;
public class SCMUploaderCanUploadResponsePBImpl
extends SCMUploaderCanUploadResponse {
SCMUploaderCanUploadResponseProto proto =
SCMUploaderCanUploadResponseProto.getDefaultInstance();
SCMUploaderCanUploadResponseProto.Builder builder = null;
boolean viaProto = false;
public SCMUploaderCanUploadResponsePBImpl() {
builder = SCMUploaderCanUploadResponseProto.newBuilder();
}
public SCMUploaderCanUploadResponsePBImpl(
SCMUploaderCanUploadResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public SCMUploaderCanUploadResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public boolean getUploadable() {
SCMUploaderCanUploadResponseProtoOrBuilder p = viaProto ? proto : builder;
// Default to true, when in doubt allow the upload
return (p.hasUploadable()) ? p.getUploadable() : true;
}
@Override
public void setUploadable(boolean b) {
maybeInitBuilder();
builder.setUploadable(b);
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = SCMUploaderCanUploadResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,564 | 32.75 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
public class RegisterNodeManagerResponsePBImpl extends ProtoBase<RegisterNodeManagerResponseProto> implements RegisterNodeManagerResponse {
RegisterNodeManagerResponseProto proto = RegisterNodeManagerResponseProto.getDefaultInstance();
RegisterNodeManagerResponseProto.Builder builder = null;
boolean viaProto = false;
private MasterKey containerTokenMasterKey = null;
private MasterKey nmTokenMasterKey = null;
private boolean rebuild = false;
public RegisterNodeManagerResponsePBImpl() {
builder = RegisterNodeManagerResponseProto.newBuilder();
}
public RegisterNodeManagerResponsePBImpl(RegisterNodeManagerResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RegisterNodeManagerResponseProto getProto() {
if (rebuild)
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.containerTokenMasterKey != null) {
builder.setContainerTokenMasterKey(
convertToProtoFormat(this.containerTokenMasterKey));
}
if (this.nmTokenMasterKey != null) {
builder.setNmTokenMasterKey(
convertToProtoFormat(this.nmTokenMasterKey));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
rebuild = false;
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RegisterNodeManagerResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public MasterKey getContainerTokenMasterKey() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.containerTokenMasterKey != null) {
return this.containerTokenMasterKey;
}
if (!p.hasContainerTokenMasterKey()) {
return null;
}
this.containerTokenMasterKey =
convertFromProtoFormat(p.getContainerTokenMasterKey());
return this.containerTokenMasterKey;
}
@Override
public void setContainerTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearContainerTokenMasterKey();
this.containerTokenMasterKey = masterKey;
rebuild = true;
}
@Override
public MasterKey getNMTokenMasterKey() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.nmTokenMasterKey != null) {
return this.nmTokenMasterKey;
}
if (!p.hasNmTokenMasterKey()) {
return null;
}
this.nmTokenMasterKey =
convertFromProtoFormat(p.getNmTokenMasterKey());
return this.nmTokenMasterKey;
}
@Override
public void setNMTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearNmTokenMasterKey();
this.nmTokenMasterKey = masterKey;
rebuild = true;
}
@Override
public String getDiagnosticsMessage() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticsMessage()) {
return null;
}
return p.getDiagnosticsMessage();
}
@Override
public void setDiagnosticsMessage(String diagnosticsMessage) {
maybeInitBuilder();
if (diagnosticsMessage == null) {
builder.clearDiagnosticsMessage();
return;
}
builder.setDiagnosticsMessage((diagnosticsMessage));
}
@Override
public String getRMVersion() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasRmVersion()) {
return null;
}
return p.getRmVersion();
}
@Override
public void setRMVersion(String rmVersion) {
maybeInitBuilder();
if (rmVersion == null) {
builder.clearRmIdentifier();
return;
}
builder.setRmVersion(rmVersion);
}
@Override
public NodeAction getNodeAction() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
if(!p.hasNodeAction()) {
return null;
}
return convertFromProtoFormat(p.getNodeAction());
}
@Override
public void setNodeAction(NodeAction nodeAction) {
maybeInitBuilder();
if (nodeAction == null) {
builder.clearNodeAction();
} else {
builder.setNodeAction(convertToProtoFormat(nodeAction));
}
rebuild = true;
}
@Override
public long getRMIdentifier() {
RegisterNodeManagerResponseProtoOrBuilder p = viaProto ? proto : builder;
return (p.getRmIdentifier());
}
@Override
public void setRMIdentifier(long rmIdentifier) {
maybeInitBuilder();
builder.setRmIdentifier(rmIdentifier);
}
private NodeAction convertFromProtoFormat(NodeActionProto p) {
return NodeAction.valueOf(p.name());
}
private NodeActionProto convertToProtoFormat(NodeAction t) {
return NodeActionProto.valueOf(t.name());
}
private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) {
return new MasterKeyPBImpl(p);
}
private MasterKeyProto convertToProtoFormat(MasterKey t) {
return ((MasterKeyPBImpl)t).getProto();
}
@Override
public boolean getAreNodeLabelsAcceptedByRM() {
RegisterNodeManagerResponseProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
return p.getAreNodeLabelsAcceptedByRM();
}
@Override
public void setAreNodeLabelsAcceptedByRM(boolean areNodeLabelsAcceptedByRM) {
maybeInitBuilder();
this.builder.setAreNodeLabelsAcceptedByRM(areNodeLabelsAcceptedByRM);
}
}
| 7,087 | 29.420601 | 139 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SystemCredentialsForAppsProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
public class NodeHeartbeatResponsePBImpl extends
ProtoBase<NodeHeartbeatResponseProto> implements NodeHeartbeatResponse {
NodeHeartbeatResponseProto proto = NodeHeartbeatResponseProto.getDefaultInstance();
NodeHeartbeatResponseProto.Builder builder = null;
boolean viaProto = false;
private List<ContainerId> containersToCleanup = null;
private List<ContainerId> containersToBeRemovedFromNM = null;
private List<ApplicationId> applicationsToCleanup = null;
private Map<ApplicationId, ByteBuffer> systemCredentials = null;
private MasterKey containerTokenMasterKey = null;
private MasterKey nmTokenMasterKey = null;
public NodeHeartbeatResponsePBImpl() {
builder = NodeHeartbeatResponseProto.newBuilder();
}
public NodeHeartbeatResponsePBImpl(NodeHeartbeatResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public NodeHeartbeatResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.containersToCleanup != null) {
addContainersToCleanupToProto();
}
if (this.applicationsToCleanup != null) {
addApplicationsToCleanupToProto();
}
if (this.containersToBeRemovedFromNM != null) {
addContainersToBeRemovedFromNMToProto();
}
if (this.containerTokenMasterKey != null) {
builder.setContainerTokenMasterKey(
convertToProtoFormat(this.containerTokenMasterKey));
}
if (this.nmTokenMasterKey != null) {
builder.setNmTokenMasterKey(
convertToProtoFormat(this.nmTokenMasterKey));
}
if (this.systemCredentials != null) {
addSystemCredentialsToProto();
}
}
private void addSystemCredentialsToProto() {
maybeInitBuilder();
builder.clearSystemCredentialsForApps();
for (Map.Entry<ApplicationId, ByteBuffer> entry : systemCredentials.entrySet()) {
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
.setAppId(convertToProtoFormat(entry.getKey()))
.setCredentialsForApp(ProtoUtils.convertToProtoFormat(
entry.getValue().duplicate())));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodeHeartbeatResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public int getResponseId() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
return (p.getResponseId());
}
@Override
public void setResponseId(int responseId) {
maybeInitBuilder();
builder.setResponseId((responseId));
}
@Override
public MasterKey getContainerTokenMasterKey() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.containerTokenMasterKey != null) {
return this.containerTokenMasterKey;
}
if (!p.hasContainerTokenMasterKey()) {
return null;
}
this.containerTokenMasterKey =
convertFromProtoFormat(p.getContainerTokenMasterKey());
return this.containerTokenMasterKey;
}
@Override
public void setContainerTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearContainerTokenMasterKey();
this.containerTokenMasterKey = masterKey;
}
@Override
public MasterKey getNMTokenMasterKey() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.nmTokenMasterKey != null) {
return this.nmTokenMasterKey;
}
if (!p.hasNmTokenMasterKey()) {
return null;
}
this.nmTokenMasterKey =
convertFromProtoFormat(p.getNmTokenMasterKey());
return this.nmTokenMasterKey;
}
@Override
public void setNMTokenMasterKey(MasterKey masterKey) {
maybeInitBuilder();
if (masterKey == null)
builder.clearNmTokenMasterKey();
this.nmTokenMasterKey = masterKey;
}
@Override
public NodeAction getNodeAction() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeAction()) {
return null;
}
return (convertFromProtoFormat(p.getNodeAction()));
}
@Override
public void setNodeAction(NodeAction nodeAction) {
maybeInitBuilder();
if (nodeAction == null) {
builder.clearNodeAction();
return;
}
builder.setNodeAction(convertToProtoFormat(nodeAction));
}
@Override
public String getDiagnosticsMessage() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticsMessage()) {
return null;
}
return p.getDiagnosticsMessage();
}
@Override
public void setDiagnosticsMessage(String diagnosticsMessage) {
maybeInitBuilder();
if (diagnosticsMessage == null) {
builder.clearDiagnosticsMessage();
return;
}
builder.setDiagnosticsMessage((diagnosticsMessage));
}
@Override
public List<ContainerId> getContainersToCleanup() {
initContainersToCleanup();
return this.containersToCleanup;
}
@Override
public List<ContainerId> getContainersToBeRemovedFromNM() {
initContainersToBeRemovedFromNM();
return this.containersToBeRemovedFromNM;
}
private void initContainersToCleanup() {
if (this.containersToCleanup != null) {
return;
}
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerIdProto> list = p.getContainersToCleanupList();
this.containersToCleanup = new ArrayList<ContainerId>();
for (ContainerIdProto c : list) {
this.containersToCleanup.add(convertFromProtoFormat(c));
}
}
private void initContainersToBeRemovedFromNM() {
if (this.containersToBeRemovedFromNM != null) {
return;
}
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerIdProto> list = p.getContainersToBeRemovedFromNmList();
this.containersToBeRemovedFromNM = new ArrayList<ContainerId>();
for (ContainerIdProto c : list) {
this.containersToBeRemovedFromNM.add(convertFromProtoFormat(c));
}
}
@Override
public void addAllContainersToCleanup(
final List<ContainerId> containersToCleanup) {
if (containersToCleanup == null)
return;
initContainersToCleanup();
this.containersToCleanup.addAll(containersToCleanup);
}
@Override
public void
addContainersToBeRemovedFromNM(final List<ContainerId> containers) {
if (containers == null)
return;
initContainersToBeRemovedFromNM();
this.containersToBeRemovedFromNM.addAll(containers);
}
private void addContainersToCleanupToProto() {
maybeInitBuilder();
builder.clearContainersToCleanup();
if (containersToCleanup == null)
return;
Iterable<ContainerIdProto> iterable = new Iterable<ContainerIdProto>() {
@Override
public Iterator<ContainerIdProto> iterator() {
return new Iterator<ContainerIdProto>() {
Iterator<ContainerId> iter = containersToCleanup.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ContainerIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllContainersToCleanup(iterable);
}
private void addContainersToBeRemovedFromNMToProto() {
maybeInitBuilder();
builder.clearContainersToBeRemovedFromNm();
if (containersToBeRemovedFromNM == null)
return;
Iterable<ContainerIdProto> iterable = new Iterable<ContainerIdProto>() {
@Override
public Iterator<ContainerIdProto> iterator() {
return new Iterator<ContainerIdProto>() {
Iterator<ContainerId> iter = containersToBeRemovedFromNM.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ContainerIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllContainersToBeRemovedFromNm(iterable);
}
@Override
public List<ApplicationId> getApplicationsToCleanup() {
initApplicationsToCleanup();
return this.applicationsToCleanup;
}
private void initApplicationsToCleanup() {
if (this.applicationsToCleanup != null) {
return;
}
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ApplicationIdProto> list = p.getApplicationsToCleanupList();
this.applicationsToCleanup = new ArrayList<ApplicationId>();
for (ApplicationIdProto c : list) {
this.applicationsToCleanup.add(convertFromProtoFormat(c));
}
}
@Override
public void addAllApplicationsToCleanup(
final List<ApplicationId> applicationsToCleanup) {
if (applicationsToCleanup == null)
return;
initApplicationsToCleanup();
this.applicationsToCleanup.addAll(applicationsToCleanup);
}
private void addApplicationsToCleanupToProto() {
maybeInitBuilder();
builder.clearApplicationsToCleanup();
if (applicationsToCleanup == null)
return;
Iterable<ApplicationIdProto> iterable = new Iterable<ApplicationIdProto>() {
@Override
public Iterator<ApplicationIdProto> iterator() {
return new Iterator<ApplicationIdProto>() {
Iterator<ApplicationId> iter = applicationsToCleanup.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ApplicationIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllApplicationsToCleanup(iterable);
}
@Override
public Map<ApplicationId, ByteBuffer> getSystemCredentialsForApps() {
if (this.systemCredentials != null) {
return this.systemCredentials;
}
initSystemCredentials();
return systemCredentials;
}
private void initSystemCredentials() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
List<SystemCredentialsForAppsProto> list = p.getSystemCredentialsForAppsList();
this.systemCredentials = new HashMap<ApplicationId, ByteBuffer> ();
for (SystemCredentialsForAppsProto c : list) {
ApplicationId appId = convertFromProtoFormat(c.getAppId());
ByteBuffer byteBuffer = ProtoUtils.convertFromProtoFormat(c.getCredentialsForApp());
this.systemCredentials.put(appId, byteBuffer);
}
}
@Override
public void setSystemCredentialsForApps(
Map<ApplicationId, ByteBuffer> systemCredentials) {
if (systemCredentials == null || systemCredentials.isEmpty()) {
return;
}
maybeInitBuilder();
this.systemCredentials = new HashMap<ApplicationId, ByteBuffer>();
this.systemCredentials.putAll(systemCredentials);
}
@Override
public long getNextHeartBeatInterval() {
NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
return (p.getNextHeartBeatInterval());
}
@Override
public void setNextHeartBeatInterval(long nextHeartBeatInterval) {
maybeInitBuilder();
builder.setNextHeartBeatInterval(nextHeartBeatInterval);
}
private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
return new ContainerIdPBImpl(p);
}
private ContainerIdProto convertToProtoFormat(ContainerId t) {
return ((ContainerIdPBImpl) t).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
return new ApplicationIdPBImpl(p);
}
private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
return ((ApplicationIdPBImpl) t).getProto();
}
private NodeAction convertFromProtoFormat(NodeActionProto p) {
return NodeAction.valueOf(p.name());
}
private NodeActionProto convertToProtoFormat(NodeAction t) {
return NodeActionProto.valueOf(t.name());
}
private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) {
return new MasterKeyPBImpl(p);
}
private MasterKeyProto convertToProtoFormat(MasterKey t) {
return ((MasterKeyPBImpl) t).getProto();
}
@Override
public boolean getAreNodeLabelsAcceptedByRM() {
NodeHeartbeatResponseProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
return p.getAreNodeLabelsAcceptedByRM();
}
@Override
public void setAreNodeLabelsAcceptedByRM(boolean areNodeLabelsAcceptedByRM) {
maybeInitBuilder();
this.builder.setAreNodeLabelsAcceptedByRM(areNodeLabelsAcceptedByRM);
}
}
| 15,331 | 29.602794 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SCMUploaderCanUploadRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest;
public class SCMUploaderCanUploadRequestPBImpl
extends SCMUploaderCanUploadRequest {
SCMUploaderCanUploadRequestProto proto =
SCMUploaderCanUploadRequestProto.getDefaultInstance();
SCMUploaderCanUploadRequestProto.Builder builder = null;
boolean viaProto = false;
public SCMUploaderCanUploadRequestPBImpl() {
builder = SCMUploaderCanUploadRequestProto.newBuilder();
}
public SCMUploaderCanUploadRequestPBImpl(
SCMUploaderCanUploadRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public SCMUploaderCanUploadRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public String getResourceKey() {
SCMUploaderCanUploadRequestProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasResourceKey()) ? p.getResourceKey() : null;
}
@Override
public void setResourceKey(String key) {
maybeInitBuilder();
if (key == null) {
builder.clearResourceKey();
return;
}
builder.setResourceKey(key);
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = SCMUploaderCanUploadRequestProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,578 | 31.64557 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/LogAggregationReportPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class LogAggregationReportPBImpl extends LogAggregationReport {
LogAggregationReportProto proto = LogAggregationReportProto
.getDefaultInstance();
LogAggregationReportProto.Builder builder = null;
boolean viaProto = false;
private ApplicationId applicationId;
public LogAggregationReportPBImpl() {
builder = LogAggregationReportProto.newBuilder();
}
public LogAggregationReportPBImpl(LogAggregationReportProto proto) {
this.proto = proto;
viaProto = true;
}
public LogAggregationReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationId != null
&& !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
builder.getApplicationId())) {
builder.setApplicationId(convertToProtoFormat(this.applicationId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = LogAggregationReportProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public ApplicationId getApplicationId() {
if (this.applicationId != null) {
return this.applicationId;
}
LogAggregationReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationId()) {
return null;
}
this.applicationId = convertFromProtoFormat(p.getApplicationId());
return this.applicationId;
}
@Override
public void setApplicationId(ApplicationId appId) {
maybeInitBuilder();
if (appId == null)
builder.clearApplicationId();
this.applicationId = appId;
}
private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
return ((ApplicationIdPBImpl) t).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(
ApplicationIdProto applicationId) {
return new ApplicationIdPBImpl(applicationId);
}
@Override
public LogAggregationStatus getLogAggregationStatus() {
LogAggregationReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasLogAggregationStatus()) {
return null;
}
return convertFromProtoFormat(p.getLogAggregationStatus());
}
@Override
public void
setLogAggregationStatus(LogAggregationStatus logAggregationStatus) {
maybeInitBuilder();
if (logAggregationStatus == null) {
builder.clearLogAggregationStatus();
return;
}
builder.setLogAggregationStatus(convertToProtoFormat(logAggregationStatus));
}
private LogAggregationStatus convertFromProtoFormat(
LogAggregationStatusProto s) {
return ProtoUtils.convertFromProtoFormat(s);
}
private LogAggregationStatusProto
convertToProtoFormat(LogAggregationStatus s) {
return ProtoUtils.convertToProtoFormat(s);
}
@Override
public String getDiagnosticMessage() {
LogAggregationReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return p.getDiagnostics();
}
@Override
public void setDiagnosticMessage(String diagnosticMessage) {
maybeInitBuilder();
if (diagnosticMessage == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnosticMessage);
}
}
| 5,618 | 29.372973 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SCMUploaderNotifyResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyResponseProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse;
public class SCMUploaderNotifyResponsePBImpl extends SCMUploaderNotifyResponse {
SCMUploaderNotifyResponseProto proto =
SCMUploaderNotifyResponseProto.getDefaultInstance();
SCMUploaderNotifyResponseProto.Builder builder = null;
boolean viaProto = false;
public SCMUploaderNotifyResponsePBImpl() {
builder = SCMUploaderNotifyResponseProto.newBuilder();
}
public SCMUploaderNotifyResponsePBImpl(SCMUploaderNotifyResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public SCMUploaderNotifyResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public boolean getAccepted() {
SCMUploaderNotifyResponseProtoOrBuilder p = viaProto ? proto : builder;
// Default to true, when in doubt just leave the file in the cache
return (p.hasAccepted()) ? p.getAccepted() : true;
}
@Override
public void setAccepted(boolean b) {
maybeInitBuilder();
builder.setAccepted(b);
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = SCMUploaderNotifyResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,514 | 32.986486 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/ResourceUtilization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.util.Records;
/**
* <p>
* <code>ResourceUtilization</code> models the utilization of a set of computer
* resources in the cluster.
* </p>
*/
@Private
@Evolving
public abstract class ResourceUtilization implements
Comparable<ResourceUtilization> {
public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) {
ResourceUtilization utilization =
Records.newRecord(ResourceUtilization.class);
utilization.setPhysicalMemory(pmem);
utilization.setVirtualMemory(vmem);
utilization.setCPU(cpu);
return utilization;
}
/**
* Get used <em>virtual memory</em>.
*
* @return <em>virtual memory</em> in MB
*/
public abstract int getVirtualMemory();
/**
* Set used <em>virtual memory</em>.
*
* @param vmem <em>virtual memory</em> in MB
*/
public abstract void setVirtualMemory(int vmem);
/**
* Get <em>physical memory</em>.
*
* @return <em>physical memory</em> in MB
*/
public abstract int getPhysicalMemory();
/**
* Set <em>physical memory</em>.
*
* @param pmem <em>physical memory</em> in MB
*/
public abstract void setPhysicalMemory(int pmem);
/**
* Get <em>CPU</em> utilization.
*
* @return <em>CPU utilization</em> normalized to 1 CPU
*/
public abstract float getCPU();
/**
* Set <em>CPU</em> utilization.
*
* @param cpu <em>CPU utilization</em> normalized to 1 CPU
*/
public abstract void setCPU(float cpu);
@Override
public int hashCode() {
final int prime = 263167;
int result = 3571;
result = prime * result + getVirtualMemory();
result = prime * result + getPhysicalMemory();
result = 31 * result + Float.valueOf(getCPU()).hashCode();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof ResourceUtilization)) {
return false;
}
ResourceUtilization other = (ResourceUtilization) obj;
if (getVirtualMemory() != other.getVirtualMemory()
|| getPhysicalMemory() != other.getPhysicalMemory()
|| getCPU() != other.getCPU()) {
return false;
}
return true;
}
@Override
public String toString() {
return "<pmem:" + getPhysicalMemory() + ", vmem:" + getVirtualMemory()
+ ", vCores:" + getCPU() + ">";
}
/**
* Add utilization to the current one.
* @param pmem Physical memory used to add.
* @param vmem Virtual memory used to add.
* @param cpu CPU utilization to add.
*/
public void addTo(int pmem, int vmem, float cpu) {
this.setPhysicalMemory(this.getPhysicalMemory() + pmem);
this.setVirtualMemory(this.getVirtualMemory() + vmem);
this.setCPU(this.getCPU() + cpu);
}
}
| 3,811 | 27.661654 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeAction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
/**
* The NodeManager is instructed to perform the given action.
*
*/
public enum NodeAction {
NORMAL, RESYNC, SHUTDOWN
}
| 989 | 33.137931 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Server records. */
package org.apache.hadoop.yarn.server.api.records;
| 879 | 45.315789 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
/**
* {@code NodeStatus} is a summary of the status of the node.
* <p>
* It includes information such as:
* <ul>
* <li>Node information and status..</li>
* <li>Container status.</li>
* </ul>
*/
public abstract class NodeStatus {
/**
* Create a new {@code NodeStatus}.
* @param nodeId Identifier for this node.
* @param responseId Identifier for the response.
* @param containerStatuses Status of the containers running in this node.
* @param keepAliveApplications Applications to keep alive.
* @param nodeHealthStatus Health status of the node.
* @param containersUtilizations Utilization of the containers in this node.
* @return New {@code NodeStatus} with the provided information.
*/
public static NodeStatus newInstance(NodeId nodeId, int responseId,
List<ContainerStatus> containerStatuses,
List<ApplicationId> keepAliveApplications,
NodeHealthStatus nodeHealthStatus,
ResourceUtilization containersUtilization) {
NodeStatus nodeStatus = Records.newRecord(NodeStatus.class);
nodeStatus.setResponseId(responseId);
nodeStatus.setNodeId(nodeId);
nodeStatus.setContainersStatuses(containerStatuses);
nodeStatus.setKeepAliveApplications(keepAliveApplications);
nodeStatus.setNodeHealthStatus(nodeHealthStatus);
nodeStatus.setContainersUtilization(containersUtilization);
return nodeStatus;
}
public abstract NodeId getNodeId();
public abstract int getResponseId();
public abstract List<ContainerStatus> getContainersStatuses();
public abstract void setContainersStatuses(
List<ContainerStatus> containersStatuses);
public abstract List<ApplicationId> getKeepAliveApplications();
public abstract void setKeepAliveApplications(List<ApplicationId> appIds);
public abstract NodeHealthStatus getNodeHealthStatus();
public abstract void setNodeHealthStatus(NodeHealthStatus healthStatus);
public abstract void setNodeId(NodeId nodeId);
public abstract void setResponseId(int responseId);
/**
* Get the <em>resource utilization</em> of the containers.
* @return <em>resource utilization</em> of the containers
*/
@Public
@Stable
public abstract ResourceUtilization getContainersUtilization();
@Private
@Unstable
public abstract void setContainersUtilization(
ResourceUtilization containersUtilization);
}
| 3,730 | 37.864583 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeHealthStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.util.Records;
/**
* {@code NodeHealthStatus} is a summary of the health status of the node.
* <p>
* It includes information such as:
* <ul>
* <li>
* An indicator of whether the node is healthy, as determined by the
* health-check script.
* </li>
* <li>The previous time at which the health status was reported.</li>
* <li>A diagnostic report on the health status.</li>
* </ul>
*
* @see NodeReport
* @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
*/
@Public
@Stable
public abstract class NodeHealthStatus {
@Private
public static NodeHealthStatus newInstance(boolean isNodeHealthy,
String healthReport, long lastHealthReport) {
NodeHealthStatus status = Records.newRecord(NodeHealthStatus.class);
status.setIsNodeHealthy(isNodeHealthy);
status.setHealthReport(healthReport);
status.setLastHealthReportTime(lastHealthReport);
return status;
}
/**
* Is the node healthy?
* @return <code>true</code> if the node is healthy, else <code>false</code>
*/
@Public
@Stable
public abstract boolean getIsNodeHealthy();
@Private
@Unstable
public abstract void setIsNodeHealthy(boolean isNodeHealthy);
/**
* Get the <em>diagnostic health report</em> of the node.
* @return <em>diagnostic health report</em> of the node
*/
@Public
@Stable
public abstract String getHealthReport();
@Private
@Unstable
public abstract void setHealthReport(String healthReport);
/**
* Get the <em>last timestamp</em> at which the health report was received.
* @return <em>last timestamp</em> at which the health report was received
*/
@Public
@Stable
public abstract long getLastHealthReportTime();
@Private
@Unstable
public abstract void setLastHealthReportTime(long lastHealthReport);
}
| 3,134 | 32.709677 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/MasterKey.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records;
import java.nio.ByteBuffer;
public interface MasterKey {
int getKeyId();
void setKeyId(int keyId);
ByteBuffer getBytes();
void setBytes(ByteBuffer bytes);
}
| 1,031 | 29.352941 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records.impl.pb;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
public class NodeStatusPBImpl extends NodeStatus {
NodeStatusProto proto = NodeStatusProto.getDefaultInstance();
NodeStatusProto.Builder builder = null;
boolean viaProto = false;
private NodeId nodeId = null;
private List<ContainerStatus> containers = null;
private NodeHealthStatus nodeHealthStatus = null;
private List<ApplicationId> keepAliveApplications = null;
public NodeStatusPBImpl() {
builder = NodeStatusProto.newBuilder();
}
public NodeStatusPBImpl(NodeStatusProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized NodeStatusProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.nodeId != null) {
builder.setNodeId(convertToProtoFormat(this.nodeId));
}
if (this.containers != null) {
addContainersToProto();
}
if (this.nodeHealthStatus != null) {
builder.setNodeHealthStatus(convertToProtoFormat(this.nodeHealthStatus));
}
if (this.keepAliveApplications != null) {
addKeepAliveApplicationsToProto();
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = NodeStatusProto.newBuilder(proto);
}
viaProto = false;
}
private synchronized void addContainersToProto() {
maybeInitBuilder();
builder.clearContainersStatuses();
if (containers == null)
return;
Iterable<ContainerStatusProto> iterable = new Iterable<ContainerStatusProto>() {
@Override
public Iterator<ContainerStatusProto> iterator() {
return new Iterator<ContainerStatusProto>() {
Iterator<ContainerStatus> iter = containers.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ContainerStatusProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllContainersStatuses(iterable);
}
private synchronized void addKeepAliveApplicationsToProto() {
maybeInitBuilder();
builder.clearKeepAliveApplications();
if (keepAliveApplications == null)
return;
Iterable<ApplicationIdProto> iterable = new Iterable<ApplicationIdProto>() {
@Override
public Iterator<ApplicationIdProto> iterator() {
return new Iterator<ApplicationIdProto>() {
Iterator<ApplicationId> iter = keepAliveApplications.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public ApplicationIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllKeepAliveApplications(iterable);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public synchronized int getResponseId() {
NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
return p.getResponseId();
}
@Override
public synchronized void setResponseId(int responseId) {
maybeInitBuilder();
builder.setResponseId(responseId);
}
@Override
public synchronized NodeId getNodeId() {
NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
if (this.nodeId != null) {
return this.nodeId;
}
if (!p.hasNodeId()) {
return null;
}
this.nodeId = convertFromProtoFormat(p.getNodeId());
return this.nodeId;
}
@Override
public synchronized void setNodeId(NodeId nodeId) {
maybeInitBuilder();
if (nodeId == null)
builder.clearNodeId();
this.nodeId = nodeId;
}
@Override
public synchronized List<ContainerStatus> getContainersStatuses() {
initContainers();
return this.containers;
}
@Override
public synchronized void setContainersStatuses(
List<ContainerStatus> containers) {
if (containers == null) {
builder.clearContainersStatuses();
}
this.containers = containers;
}
@Override
public synchronized List<ApplicationId> getKeepAliveApplications() {
initKeepAliveApplications();
return this.keepAliveApplications;
}
@Override
public synchronized void setKeepAliveApplications(List<ApplicationId> appIds) {
if (appIds == null) {
builder.clearKeepAliveApplications();
}
this.keepAliveApplications = appIds;
}
private synchronized void initContainers() {
if (this.containers != null) {
return;
}
NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerStatusProto> list = p.getContainersStatusesList();
this.containers = new ArrayList<ContainerStatus>();
for (ContainerStatusProto c : list) {
this.containers.add(convertFromProtoFormat(c));
}
}
private synchronized void initKeepAliveApplications() {
if (this.keepAliveApplications != null) {
return;
}
NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
List<ApplicationIdProto> list = p.getKeepAliveApplicationsList();
this.keepAliveApplications = new ArrayList<ApplicationId>();
for (ApplicationIdProto c : list) {
this.keepAliveApplications.add(convertFromProtoFormat(c));
}
}
@Override
public synchronized NodeHealthStatus getNodeHealthStatus() {
NodeStatusProtoOrBuilder p = viaProto ? proto : builder;
if (nodeHealthStatus != null) {
return nodeHealthStatus;
}
if (!p.hasNodeHealthStatus()) {
return null;
}
nodeHealthStatus = convertFromProtoFormat(p.getNodeHealthStatus());
return nodeHealthStatus;
}
@Override
public synchronized void setNodeHealthStatus(NodeHealthStatus healthStatus) {
maybeInitBuilder();
if (healthStatus == null) {
builder.clearNodeHealthStatus();
}
this.nodeHealthStatus = healthStatus;
}
@Override
public ResourceUtilization getContainersUtilization() {
NodeStatusProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
if (!p.hasContainersUtilization()) {
return null;
}
return convertFromProtoFormat(p.getContainersUtilization());
}
@Override
public void setContainersUtilization(
ResourceUtilization containersUtilization) {
maybeInitBuilder();
if (containersUtilization == null) {
this.builder.clearContainersUtilization();
return;
}
this.builder
.setContainersUtilization(convertToProtoFormat(containersUtilization));
}
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl)nodeId).getProto();
}
private NodeId convertFromProtoFormat(NodeIdProto proto) {
return new NodeIdPBImpl(proto);
}
private NodeHealthStatusProto convertToProtoFormat(
NodeHealthStatus healthStatus) {
return ((NodeHealthStatusPBImpl) healthStatus).getProto();
}
private NodeHealthStatus convertFromProtoFormat(NodeHealthStatusProto proto) {
return new NodeHealthStatusPBImpl(proto);
}
private ContainerStatusPBImpl convertFromProtoFormat(ContainerStatusProto c) {
return new ContainerStatusPBImpl(c);
}
private ContainerStatusProto convertToProtoFormat(ContainerStatus c) {
return ((ContainerStatusPBImpl)c).getProto();
}
private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto c) {
return new ApplicationIdPBImpl(c);
}
private ApplicationIdProto convertToProtoFormat(ApplicationId c) {
return ((ApplicationIdPBImpl)c).getProto();
}
private ResourceUtilizationProto convertToProtoFormat(ResourceUtilization r) {
return ((ResourceUtilizationPBImpl) r).getProto();
}
private ResourceUtilizationPBImpl convertFromProtoFormat(
ResourceUtilizationProto p) {
return new ResourceUtilizationPBImpl(p);
}
}
| 10,657 | 28.688022 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import com.google.protobuf.TextFormat;
public class NodeHealthStatusPBImpl extends NodeHealthStatus {
private NodeHealthStatusProto.Builder builder;
private boolean viaProto = false;
private NodeHealthStatusProto proto = NodeHealthStatusProto
.getDefaultInstance();
public NodeHealthStatusPBImpl() {
this.builder = NodeHealthStatusProto.newBuilder();
}
public NodeHealthStatusPBImpl(NodeHealthStatusProto proto) {
this.proto = proto;
this.viaProto = true;
}
public NodeHealthStatusProto getProto() {
mergeLocalToProto();
this.proto = this.viaProto ? this.proto : this.builder.build();
this.viaProto = true;
return this.proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToProto() {
if (this.viaProto)
maybeInitBuilder();
this.proto = this.builder.build();
this.viaProto = true;
}
private void maybeInitBuilder() {
if (this.viaProto || this.builder == null) {
this.builder = NodeHealthStatusProto.newBuilder(this.proto);
}
this.viaProto = false;
}
@Override
public boolean getIsNodeHealthy() {
NodeHealthStatusProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
return p.getIsNodeHealthy();
}
@Override
public void setIsNodeHealthy(boolean isNodeHealthy) {
maybeInitBuilder();
this.builder.setIsNodeHealthy(isNodeHealthy);
}
@Override
public String getHealthReport() {
NodeHealthStatusProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
if (!p.hasHealthReport()) {
return null;
}
return (p.getHealthReport());
}
@Override
public void setHealthReport(String healthReport) {
maybeInitBuilder();
if (healthReport == null) {
this.builder.clearHealthReport();
return;
}
this.builder.setHealthReport((healthReport));
}
@Override
public long getLastHealthReportTime() {
NodeHealthStatusProtoOrBuilder p =
this.viaProto ? this.proto : this.builder;
return (p.getLastHealthReportTime());
}
@Override
public void setLastHealthReportTime(long lastHealthReport) {
maybeInitBuilder();
this.builder.setLastHealthReportTime((lastHealthReport));
}
}
| 3,734 | 27.295455 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/MasterKeyPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records.impl.pb;
import java.nio.ByteBuffer;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
public class MasterKeyPBImpl extends ProtoBase<MasterKeyProto> implements
MasterKey {
MasterKeyProto proto = MasterKeyProto.getDefaultInstance();
MasterKeyProto.Builder builder = null;
boolean viaProto = false;
public MasterKeyPBImpl() {
builder = MasterKeyProto.newBuilder();
}
public MasterKeyPBImpl(MasterKeyProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized MasterKeyProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = MasterKeyProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized int getKeyId() {
MasterKeyProtoOrBuilder p = viaProto ? proto : builder;
return (p.getKeyId());
}
@Override
public synchronized void setKeyId(int id) {
maybeInitBuilder();
builder.setKeyId((id));
}
@Override
public synchronized ByteBuffer getBytes() {
MasterKeyProtoOrBuilder p = viaProto ? proto : builder;
return convertFromProtoFormat(p.getBytes());
}
@Override
public synchronized void setBytes(ByteBuffer bytes) {
maybeInitBuilder();
builder.setBytes(convertToProtoFormat(bytes));
}
@Override
public int hashCode() {
return getKeyId();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!(obj instanceof MasterKey)) {
return false;
}
MasterKey other = (MasterKey) obj;
if (this.getKeyId() != other.getKeyId()) {
return false;
}
if (!this.getBytes().equals(other.getBytes())) {
return false;
}
return true;
}
}
| 2,905 | 27.213592 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/ResourceUtilizationPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.records.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
@Private
@Unstable
public class ResourceUtilizationPBImpl extends ResourceUtilization {
private ResourceUtilizationProto proto = ResourceUtilizationProto
.getDefaultInstance();
private ResourceUtilizationProto.Builder builder = null;
private boolean viaProto = false;
public ResourceUtilizationPBImpl() {
builder = ResourceUtilizationProto.newBuilder();
}
public ResourceUtilizationPBImpl(ResourceUtilizationProto proto) {
this.proto = proto;
viaProto = true;
}
public ResourceUtilizationProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ResourceUtilizationProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public int getPhysicalMemory() {
ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
return (p.getPmem());
}
@Override
public void setPhysicalMemory(int pmem) {
maybeInitBuilder();
builder.setPmem(pmem);
}
@Override
public int getVirtualMemory() {
ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
return (p.getVmem());
}
@Override
public void setVirtualMemory(int vmem) {
maybeInitBuilder();
builder.setPmem(vmem);
}
@Override
public float getCPU() {
ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
return p.getCpu();
}
@Override
public void setCPU(float cpu) {
maybeInitBuilder();
builder.setCpu(cpu);
}
@Override
public int compareTo(ResourceUtilization other) {
int diff = this.getPhysicalMemory() - other.getPhysicalMemory();
if (diff == 0) {
diff = this.getVirtualMemory() - other.getVirtualMemory();
if (diff == 0) {
diff = Float.compare(this.getCPU(), other.getCPU());
}
}
return diff;
}
}
| 3,152 | 29.028571 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerResponsePBImpl;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class ResourceTrackerPBServiceImpl implements ResourceTrackerPB {
private ResourceTracker real;
public ResourceTrackerPBServiceImpl(ResourceTracker impl) {
this.real = impl;
}
@Override
public RegisterNodeManagerResponseProto registerNodeManager(
RpcController controller, RegisterNodeManagerRequestProto proto)
throws ServiceException {
RegisterNodeManagerRequestPBImpl request = new RegisterNodeManagerRequestPBImpl(proto);
try {
RegisterNodeManagerResponse response = real.registerNodeManager(request);
return ((RegisterNodeManagerResponsePBImpl)response).getProto();
} catch (YarnException | IOException e) {
throw new ServiceException(e);
}
}
@Override
public NodeHeartbeatResponseProto nodeHeartbeat(RpcController controller,
NodeHeartbeatRequestProto proto) throws ServiceException {
NodeHeartbeatRequestPBImpl request = new NodeHeartbeatRequestPBImpl(proto);
try {
NodeHeartbeatResponse response = real.nodeHeartbeat(request);
return ((NodeHeartbeatResponsePBImpl)response).getProto();
} catch (YarnException | IOException e) {
throw new ServiceException(e);
}
}
@Override
public UnRegisterNodeManagerResponseProto unRegisterNodeManager(
RpcController controller, UnRegisterNodeManagerRequestProto proto)
throws ServiceException {
UnRegisterNodeManagerRequestPBImpl request =
new UnRegisterNodeManagerRequestPBImpl(proto);
try {
UnRegisterNodeManagerResponse response = real
.unRegisterNodeManager(request);
return ((UnRegisterNodeManagerResponsePBImpl) response).getProto();
} catch (YarnException | IOException e) {
throw new ServiceException(e);
}
}
}
| 4,349 | 45.774194 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/SCMUploaderProtocolPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyResponseProto;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderCanUploadRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderCanUploadResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderNotifyRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderNotifyResponsePBImpl;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class SCMUploaderProtocolPBServiceImpl implements
SCMUploaderProtocolPB {
private SCMUploaderProtocol real;
public SCMUploaderProtocolPBServiceImpl(SCMUploaderProtocol impl) {
this.real = impl;
}
@Override
public SCMUploaderNotifyResponseProto notify(RpcController controller,
SCMUploaderNotifyRequestProto proto) throws ServiceException {
SCMUploaderNotifyRequestPBImpl request =
new SCMUploaderNotifyRequestPBImpl(proto);
try {
SCMUploaderNotifyResponse response = real.notify(request);
return ((SCMUploaderNotifyResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public SCMUploaderCanUploadResponseProto canUpload(RpcController controller,
SCMUploaderCanUploadRequestProto proto)
throws ServiceException {
SCMUploaderCanUploadRequestPBImpl request =
new SCMUploaderCanUploadRequestPBImpl(proto);
try {
SCMUploaderCanUploadResponse response = real.canUpload(request);
return ((SCMUploaderCanUploadResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
}
| 3,465 | 42.325 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProto;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderCanUploadRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderCanUploadResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderNotifyRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SCMUploaderNotifyResponsePBImpl;
import com.google.protobuf.ServiceException;
public class SCMUploaderProtocolPBClientImpl implements
SCMUploaderProtocol, Closeable {
private SCMUploaderProtocolPB proxy;
public SCMUploaderProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, SCMUploaderProtocolPB.class,
ProtobufRpcEngine.class);
proxy =
RPC.getProxy(SCMUploaderProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
this.proxy = null;
}
}
@Override
public SCMUploaderNotifyResponse notify(SCMUploaderNotifyRequest request)
throws YarnException, IOException {
SCMUploaderNotifyRequestProto requestProto =
((SCMUploaderNotifyRequestPBImpl) request).getProto();
try {
return new SCMUploaderNotifyResponsePBImpl(proxy.notify(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public SCMUploaderCanUploadResponse canUpload(
SCMUploaderCanUploadRequest request) throws YarnException, IOException {
SCMUploaderCanUploadRequestProto requestProto =
((SCMUploaderCanUploadRequestPBImpl)request).getProto();
try {
return new SCMUploaderCanUploadResponsePBImpl(proxy.canUpload(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 3,806 | 39.5 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerResponsePBImpl;
import com.google.protobuf.ServiceException;
public class ResourceTrackerPBClientImpl implements ResourceTracker, Closeable {
private ResourceTrackerPB proxy;
public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ResourceTrackerPB.class, ProtobufRpcEngine.class);
proxy = (ResourceTrackerPB)RPC.getProxy(
ResourceTrackerPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if(this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerRequestProto requestProto = ((RegisterNodeManagerRequestPBImpl)request).getProto();
try {
return new RegisterNodeManagerResponsePBImpl(proxy.registerNodeManager(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeHeartbeatRequestProto requestProto = ((NodeHeartbeatRequestPBImpl)request).getProto();
try {
return new NodeHeartbeatResponsePBImpl(proxy.nodeHeartbeat(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
UnRegisterNodeManagerRequestProto requestProto =
((UnRegisterNodeManagerRequestPBImpl) request).getProto();
try {
return new UnRegisterNodeManagerResponsePBImpl(
proxy.unRegisterNodeManager(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 4,689 | 43.245283 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AppAttemptMetricsConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.metrics;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class AppAttemptMetricsConstants {
public static final String ENTITY_TYPE =
"YARN_APPLICATION_ATTEMPT";
public static final String REGISTERED_EVENT_TYPE =
"YARN_APPLICATION_ATTEMPT_REGISTERED";
public static final String FINISHED_EVENT_TYPE =
"YARN_APPLICATION_ATTEMPT_FINISHED";
public static final String PARENT_PRIMARY_FILTER =
"YARN_APPLICATION_ATTEMPT_PARENT";
public static final String TRACKING_URL_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_TRACKING_URL";
public static final String ORIGINAL_TRACKING_URL_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_ORIGINAL_TRACKING_URL";
public static final String HOST_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_HOST";
public static final String RPC_PORT_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_RPC_PORT";
public static final String MASTER_CONTAINER_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_MASTER_CONTAINER";
public static final String DIAGNOSTICS_INFO_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_DIAGNOSTICS_INFO";
public static final String FINAL_STATUS_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_FINAL_STATUS";
public static final String STATE_EVENT_INFO =
"YARN_APPLICATION_ATTEMPT_STATE";
}
| 2,250 | 33.630769 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.metrics;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class ApplicationMetricsConstants {
public static final String ENTITY_TYPE =
"YARN_APPLICATION";
public static final String CREATED_EVENT_TYPE =
"YARN_APPLICATION_CREATED";
public static final String FINISHED_EVENT_TYPE =
"YARN_APPLICATION_FINISHED";
public static final String ACLS_UPDATED_EVENT_TYPE =
"YARN_APPLICATION_ACLS_UPDATED";
public static final String NAME_ENTITY_INFO =
"YARN_APPLICATION_NAME";
public static final String TYPE_ENTITY_INFO =
"YARN_APPLICATION_TYPE";
public static final String USER_ENTITY_INFO =
"YARN_APPLICATION_USER";
public static final String QUEUE_ENTITY_INFO =
"YARN_APPLICATION_QUEUE";
public static final String SUBMITTED_TIME_ENTITY_INFO =
"YARN_APPLICATION_SUBMITTED_TIME";
public static final String APP_VIEW_ACLS_ENTITY_INFO =
"YARN_APPLICATION_VIEW_ACLS";
public static final String DIAGNOSTICS_INFO_EVENT_INFO =
"YARN_APPLICATION_DIAGNOSTICS_INFO";
public static final String FINAL_STATUS_EVENT_INFO =
"YARN_APPLICATION_FINAL_STATUS";
public static final String STATE_EVENT_INFO =
"YARN_APPLICATION_STATE";
public static final String APP_CPU_METRICS =
"YARN_APPLICATION_CPU_METRIC";
public static final String APP_MEM_METRICS =
"YARN_APPLICATION_MEM_METRIC";
public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
"YARN_APPLICATION_LATEST_APP_ATTEMPT";
public static final String APP_TAGS_INFO = "YARN_APPLICATION_TAGS";
public static final String UNMANAGED_APPLICATION_ENTITY_INFO =
"YARN_APPLICATION_UNMANAGED_APPLICATION";
}
| 2,655 | 31.790123 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.metrics;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@Private
@Unstable
public class ContainerMetricsConstants {
public static final String ENTITY_TYPE = "YARN_CONTAINER";
public static final String CREATED_EVENT_TYPE = "YARN_CONTAINER_CREATED";
public static final String FINISHED_EVENT_TYPE = "YARN_CONTAINER_FINISHED";
public static final String PARENT_PRIMARIY_FILTER = "YARN_CONTAINER_PARENT";
public static final String ALLOCATED_MEMORY_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_MEMORY";
public static final String ALLOCATED_VCORE_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_VCORE";
public static final String ALLOCATED_HOST_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_HOST";
public static final String ALLOCATED_PORT_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_PORT";
public static final String ALLOCATED_PRIORITY_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_PRIORITY";
public static final String DIAGNOSTICS_INFO_EVENT_INFO =
"YARN_CONTAINER_DIAGNOSTICS_INFO";
public static final String EXIT_STATUS_EVENT_INFO =
"YARN_CONTAINER_EXIT_STATUS";
public static final String STATE_EVENT_INFO =
"YARN_CONTAINER_STATE";
public static final String ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO =
"YARN_CONTAINER_ALLOCATED_HOST_HTTP_ADDRESS";
}
| 2,252 | 34.761905 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.records;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
/**
* The version information for state get stored in YARN components,
* i.e. RMState, NMState, etc., which include: majorVersion and
* minorVersion.
* The major version update means incompatible changes happen while
* minor version update indicates compatible changes.
*/
@LimitedPrivate({"YARN", "MapReduce"})
@Unstable
public abstract class Version {
public static Version newInstance(int majorVersion, int minorVersion) {
Version version = Records.newRecord(Version.class);
version.setMajorVersion(majorVersion);
version.setMinorVersion(minorVersion);
return version;
}
public abstract int getMajorVersion();
public abstract void setMajorVersion(int majorVersion);
public abstract int getMinorVersion();
public abstract void setMinorVersion(int minorVersion);
public String toString() {
return getMajorVersion() + "." + getMinorVersion();
}
public boolean isCompatibleTo(Version version) {
return getMajorVersion() == version.getMajorVersion();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + getMajorVersion();
result = prime * result + getMinorVersion();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Version other = (Version) obj;
if (this.getMajorVersion() == other.getMajorVersion()
&& this.getMinorVersion() == other.getMinorVersion()) {
return true;
} else {
return false;
}
}
}
| 2,670 | 30.423529 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.records.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProtoOrBuilder;
import org.apache.hadoop.yarn.server.records.Version;
public class VersionPBImpl extends Version {
VersionProto proto = VersionProto.getDefaultInstance();
VersionProto.Builder builder = null;
boolean viaProto = false;
public VersionPBImpl() {
builder = VersionProto.newBuilder();
}
public VersionPBImpl(VersionProto proto) {
this.proto = proto;
viaProto = true;
}
public VersionProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = VersionProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public int getMajorVersion() {
VersionProtoOrBuilder p = viaProto ? proto : builder;
return p.getMajorVersion();
}
@Override
public void setMajorVersion(int major) {
maybeInitBuilder();
builder.setMajorVersion(major);
}
@Override
public int getMinorVersion() {
VersionProtoOrBuilder p = viaProto ? proto : builder;
return p.getMinorVersion();
}
@Override
public void setMinorVersion(int minor) {
maybeInitBuilder();
builder.setMinorVersion(minor);
}
}
| 2,219 | 27.461538 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerBuilderUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.utils;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
/**
* Server Builder utilities to construct various objects.
*
*/
public class YarnServerBuilderUtils {
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
public static NodeHeartbeatResponse newNodeHeartbeatResponse(
NodeAction action, String diagnosticsMessage) {
NodeHeartbeatResponse response = recordFactory
.newRecordInstance(NodeHeartbeatResponse.class);
response.setNodeAction(action);
response.setDiagnosticsMessage(diagnosticsMessage);
return response;
}
public static NodeHeartbeatResponse newNodeHeartbeatResponse(int responseId,
NodeAction action, List<ContainerId> containersToCleanUp,
List<ApplicationId> applicationsToCleanUp,
MasterKey containerTokenMasterKey, MasterKey nmTokenMasterKey,
long nextHeartbeatInterval) {
NodeHeartbeatResponse response = recordFactory
.newRecordInstance(NodeHeartbeatResponse.class);
response.setResponseId(responseId);
response.setNodeAction(action);
response.setContainerTokenMasterKey(containerTokenMasterKey);
response.setNMTokenMasterKey(nmTokenMasterKey);
response.setNextHeartBeatInterval(nextHeartbeatInterval);
if(containersToCleanUp != null) {
response.addAllContainersToCleanup(containersToCleanUp);
}
if(applicationsToCleanUp != null) {
response.addAllApplicationsToCleanup(applicationsToCleanUp);
}
return response;
}
}
| 2,769 | 38.571429 | 79 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.