repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager;
import java.nio.ByteBuffer;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.yarn.LocalConfigurationProvider;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
import com.google.common.annotations.VisibleForTesting;
public class RMContextImpl implements RMContext {
private Dispatcher rmDispatcher;
private boolean isHAEnabled;
private HAServiceState haServiceState =
HAServiceProtocol.HAServiceState.INITIALIZING;
private AdminService adminService;
private ConfigurationProvider configurationProvider;
private RMActiveServiceContext activeServiceContext;
private Configuration yarnConfiguration;
/**
* Default constructor. To be used in conjunction with setter methods for
* individual fields.
*/
public RMContextImpl() {
}
@VisibleForTesting
// helper constructor for tests
public RMContextImpl(Dispatcher rmDispatcher,
ContainerAllocationExpirer containerAllocationExpirer,
AMLivelinessMonitor amLivelinessMonitor,
AMLivelinessMonitor amFinishingMonitor,
DelegationTokenRenewer delegationTokenRenewer,
AMRMTokenSecretManager appTokenSecretManager,
RMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInRM nmTokenSecretManager,
ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager,
RMApplicationHistoryWriter rmApplicationHistoryWriter,
ResourceScheduler scheduler) {
this();
this.setDispatcher(rmDispatcher);
setActiveServiceContext(new RMActiveServiceContext(rmDispatcher,
containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor,
delegationTokenRenewer, appTokenSecretManager,
containerTokenSecretManager, nmTokenSecretManager,
clientToAMTokenSecretManager, rmApplicationHistoryWriter,
scheduler));
ConfigurationProvider provider = new LocalConfigurationProvider();
setConfigurationProvider(provider);
}
@VisibleForTesting
// helper constructor for tests
public RMContextImpl(Dispatcher rmDispatcher,
ContainerAllocationExpirer containerAllocationExpirer,
AMLivelinessMonitor amLivelinessMonitor,
AMLivelinessMonitor amFinishingMonitor,
DelegationTokenRenewer delegationTokenRenewer,
AMRMTokenSecretManager appTokenSecretManager,
RMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInRM nmTokenSecretManager,
ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager,
RMApplicationHistoryWriter rmApplicationHistoryWriter) {
this(
rmDispatcher,
containerAllocationExpirer,
amLivelinessMonitor,
amFinishingMonitor,
delegationTokenRenewer,
appTokenSecretManager,
containerTokenSecretManager,
nmTokenSecretManager,
clientToAMTokenSecretManager,
rmApplicationHistoryWriter,
null);
}
@Override
public Dispatcher getDispatcher() {
return this.rmDispatcher;
}
@Override
public RMStateStore getStateStore() {
return activeServiceContext.getStateStore();
}
@Override
public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
return activeServiceContext.getRMApps();
}
@Override
public ConcurrentMap<NodeId, RMNode> getRMNodes() {
return activeServiceContext.getRMNodes();
}
@Override
public ConcurrentMap<NodeId, RMNode> getInactiveRMNodes() {
return activeServiceContext.getInactiveRMNodes();
}
@Override
public ContainerAllocationExpirer getContainerAllocationExpirer() {
return activeServiceContext.getContainerAllocationExpirer();
}
@Override
public AMLivelinessMonitor getAMLivelinessMonitor() {
return activeServiceContext.getAMLivelinessMonitor();
}
@Override
public AMLivelinessMonitor getAMFinishingMonitor() {
return activeServiceContext.getAMFinishingMonitor();
}
@Override
public DelegationTokenRenewer getDelegationTokenRenewer() {
return activeServiceContext.getDelegationTokenRenewer();
}
@Override
public AMRMTokenSecretManager getAMRMTokenSecretManager() {
return activeServiceContext.getAMRMTokenSecretManager();
}
@Override
public RMContainerTokenSecretManager getContainerTokenSecretManager() {
return activeServiceContext.getContainerTokenSecretManager();
}
@Override
public NMTokenSecretManagerInRM getNMTokenSecretManager() {
return activeServiceContext.getNMTokenSecretManager();
}
@Override
public ResourceScheduler getScheduler() {
return activeServiceContext.getScheduler();
}
@Override
public ReservationSystem getReservationSystem() {
return activeServiceContext.getReservationSystem();
}
@Override
public NodesListManager getNodesListManager() {
return activeServiceContext.getNodesListManager();
}
@Override
public ClientToAMTokenSecretManagerInRM getClientToAMTokenSecretManager() {
return activeServiceContext.getClientToAMTokenSecretManager();
}
@Override
public AdminService getRMAdminService() {
return this.adminService;
}
@VisibleForTesting
public void setStateStore(RMStateStore store) {
activeServiceContext.setStateStore(store);
}
@Override
public ClientRMService getClientRMService() {
return activeServiceContext.getClientRMService();
}
@Override
public ApplicationMasterService getApplicationMasterService() {
return activeServiceContext.getApplicationMasterService();
}
@Override
public ResourceTrackerService getResourceTrackerService() {
return activeServiceContext.getResourceTrackerService();
}
void setHAEnabled(boolean isHAEnabled) {
this.isHAEnabled = isHAEnabled;
}
void setHAServiceState(HAServiceState haServiceState) {
synchronized (haServiceState) {
this.haServiceState = haServiceState;
}
}
void setDispatcher(Dispatcher dispatcher) {
this.rmDispatcher = dispatcher;
}
void setRMAdminService(AdminService adminService) {
this.adminService = adminService;
}
@Override
public void setClientRMService(ClientRMService clientRMService) {
activeServiceContext.setClientRMService(clientRMService);
}
@Override
public RMDelegationTokenSecretManager getRMDelegationTokenSecretManager() {
return activeServiceContext.getRMDelegationTokenSecretManager();
}
@Override
public void setRMDelegationTokenSecretManager(
RMDelegationTokenSecretManager delegationTokenSecretManager) {
activeServiceContext
.setRMDelegationTokenSecretManager(delegationTokenSecretManager);
}
void setContainerAllocationExpirer(
ContainerAllocationExpirer containerAllocationExpirer) {
activeServiceContext
.setContainerAllocationExpirer(containerAllocationExpirer);
}
void setAMLivelinessMonitor(AMLivelinessMonitor amLivelinessMonitor) {
activeServiceContext.setAMLivelinessMonitor(amLivelinessMonitor);
}
void setAMFinishingMonitor(AMLivelinessMonitor amFinishingMonitor) {
activeServiceContext.setAMFinishingMonitor(amFinishingMonitor);
}
void setContainerTokenSecretManager(
RMContainerTokenSecretManager containerTokenSecretManager) {
activeServiceContext
.setContainerTokenSecretManager(containerTokenSecretManager);
}
void setNMTokenSecretManager(NMTokenSecretManagerInRM nmTokenSecretManager) {
activeServiceContext.setNMTokenSecretManager(nmTokenSecretManager);
}
@VisibleForTesting
public void setScheduler(ResourceScheduler scheduler) {
activeServiceContext.setScheduler(scheduler);
}
void setReservationSystem(ReservationSystem reservationSystem) {
activeServiceContext.setReservationSystem(reservationSystem);
}
void setDelegationTokenRenewer(DelegationTokenRenewer delegationTokenRenewer) {
activeServiceContext.setDelegationTokenRenewer(delegationTokenRenewer);
}
void setClientToAMTokenSecretManager(
ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager) {
activeServiceContext
.setClientToAMTokenSecretManager(clientToAMTokenSecretManager);
}
void setAMRMTokenSecretManager(AMRMTokenSecretManager amRMTokenSecretManager) {
activeServiceContext.setAMRMTokenSecretManager(amRMTokenSecretManager);
}
void setNodesListManager(NodesListManager nodesListManager) {
activeServiceContext.setNodesListManager(nodesListManager);
}
void setApplicationMasterService(
ApplicationMasterService applicationMasterService) {
activeServiceContext.setApplicationMasterService(applicationMasterService);
}
void setResourceTrackerService(ResourceTrackerService resourceTrackerService) {
activeServiceContext.setResourceTrackerService(resourceTrackerService);
}
@Override
public boolean isHAEnabled() {
return isHAEnabled;
}
@Override
public HAServiceState getHAServiceState() {
synchronized (haServiceState) {
return haServiceState;
}
}
public void setWorkPreservingRecoveryEnabled(boolean enabled) {
activeServiceContext.setWorkPreservingRecoveryEnabled(enabled);
}
@Override
public boolean isWorkPreservingRecoveryEnabled() {
return activeServiceContext.isWorkPreservingRecoveryEnabled();
}
@Override
public RMApplicationHistoryWriter getRMApplicationHistoryWriter() {
return activeServiceContext.getRMApplicationHistoryWriter();
}
@Override
public void setSystemMetricsPublisher(
SystemMetricsPublisher systemMetricsPublisher) {
activeServiceContext.setSystemMetricsPublisher(systemMetricsPublisher);
}
@Override
public SystemMetricsPublisher getSystemMetricsPublisher() {
return activeServiceContext.getSystemMetricsPublisher();
}
@Override
public void setRMApplicationHistoryWriter(
RMApplicationHistoryWriter rmApplicationHistoryWriter) {
activeServiceContext
.setRMApplicationHistoryWriter(rmApplicationHistoryWriter);
}
@Override
public ConfigurationProvider getConfigurationProvider() {
return this.configurationProvider;
}
public void setConfigurationProvider(
ConfigurationProvider configurationProvider) {
this.configurationProvider = configurationProvider;
}
@Override
public long getEpoch() {
return activeServiceContext.getEpoch();
}
void setEpoch(long epoch) {
activeServiceContext.setEpoch(epoch);
}
@Override
public RMNodeLabelsManager getNodeLabelManager() {
return activeServiceContext.getNodeLabelManager();
}
@Override
public void setNodeLabelManager(RMNodeLabelsManager mgr) {
activeServiceContext.setNodeLabelManager(mgr);
}
public void setSchedulerRecoveryStartAndWaitTime(long waitTime) {
activeServiceContext.setSchedulerRecoveryStartAndWaitTime(waitTime);
}
public boolean isSchedulerReadyForAllocatingContainers() {
return activeServiceContext.isSchedulerReadyForAllocatingContainers();
}
@Private
@VisibleForTesting
public void setSystemClock(Clock clock) {
activeServiceContext.setSystemClock(clock);
}
public ConcurrentMap<ApplicationId, ByteBuffer> getSystemCredentialsForApps() {
return activeServiceContext.getSystemCredentialsForApps();
}
@Private
@Unstable
public RMActiveServiceContext getActiveServiceContext() {
return activeServiceContext;
}
@Private
@Unstable
void setActiveServiceContext(RMActiveServiceContext activeServiceContext) {
this.activeServiceContext = activeServiceContext;
}
@Override
public Configuration getYarnConfiguration() {
return this.yarnConfiguration;
}
public void setYarnConfiguration(Configuration yarnConfiguration) {
this.yarnConfiguration=yarnConfiguration;
}
}
| 14,338 | 31.367946 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
public enum AMLauncherEventType {
LAUNCH,
CLEANUP
}
| 930 | 37.791667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputByteBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* The launch of the AM itself.
*/
public class AMLauncher implements Runnable {
private static final Log LOG = LogFactory.getLog(AMLauncher.class);
private ContainerManagementProtocol containerMgrProxy;
private final RMAppAttempt application;
private final Configuration conf;
private final AMLauncherEventType eventType;
private final RMContext rmContext;
private final Container masterContainer;
@SuppressWarnings("rawtypes")
private final EventHandler handler;
public AMLauncher(RMContext rmContext, RMAppAttempt application,
AMLauncherEventType eventType, Configuration conf) {
this.application = application;
this.conf = conf;
this.eventType = eventType;
this.rmContext = rmContext;
this.handler = rmContext.getDispatcher().getEventHandler();
this.masterContainer = application.getMasterContainer();
}
private void connect() throws IOException {
ContainerId masterContainerID = masterContainer.getId();
containerMgrProxy = getContainerMgrProxy(masterContainerID);
}
private void launch() throws IOException, YarnException {
connect();
ContainerId masterContainerID = masterContainer.getId();
ApplicationSubmissionContext applicationContext =
application.getSubmissionContext();
LOG.info("Setting up container " + masterContainer
+ " for AM " + application.getAppAttemptId());
ContainerLaunchContext launchContext =
createAMContainerLaunchContext(applicationContext, masterContainerID);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(launchContext,
masterContainer.getContainerToken());
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
StartContainersResponse response =
containerMgrProxy.startContainers(allRequests);
if (response.getFailedRequests() != null
&& response.getFailedRequests().containsKey(masterContainerID)) {
Throwable t =
response.getFailedRequests().get(masterContainerID).deSerialize();
parseAndThrowException(t);
} else {
LOG.info("Done launching container " + masterContainer + " for AM "
+ application.getAppAttemptId());
}
}
private void cleanup() throws IOException, YarnException {
connect();
ContainerId containerId = masterContainer.getId();
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(containerId);
StopContainersRequest stopRequest =
StopContainersRequest.newInstance(containerIds);
StopContainersResponse response =
containerMgrProxy.stopContainers(stopRequest);
if (response.getFailedRequests() != null
&& response.getFailedRequests().containsKey(containerId)) {
Throwable t = response.getFailedRequests().get(containerId).deSerialize();
parseAndThrowException(t);
}
}
// Protected. For tests.
protected ContainerManagementProtocol getContainerMgrProxy(
final ContainerId containerId) {
final NodeId node = masterContainer.getNodeId();
final InetSocketAddress containerManagerBindAddress =
NetUtils.createSocketAddrForHost(node.getHost(), node.getPort());
final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.
UserGroupInformation currentUser =
UserGroupInformation.createRemoteUser(containerId
.getApplicationAttemptId().toString());
String user =
rmContext.getRMApps()
.get(containerId.getApplicationAttemptId().getApplicationId())
.getUser();
org.apache.hadoop.yarn.api.records.Token token =
rmContext.getNMTokenSecretManager().createNMToken(
containerId.getApplicationAttemptId(), node, user);
currentUser.addToken(ConverterUtils.convertFromYarn(token,
containerManagerBindAddress));
return currentUser
.doAs(new PrivilegedAction<ContainerManagementProtocol>() {
@Override
public ContainerManagementProtocol run() {
return (ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
containerManagerBindAddress, conf);
}
});
}
private ContainerLaunchContext createAMContainerLaunchContext(
ApplicationSubmissionContext applicationMasterContext,
ContainerId containerID) throws IOException {
// Construct the actual Container
ContainerLaunchContext container =
applicationMasterContext.getAMContainerSpec();
LOG.info("Command to launch container "
+ containerID
+ " : "
+ StringUtils.arrayToString(container.getCommands().toArray(
new String[0])));
// Finalize the container
setupTokens(container, containerID);
return container;
}
@Private
@VisibleForTesting
protected void setupTokens(
ContainerLaunchContext container, ContainerId containerID)
throws IOException {
Map<String, String> environment = container.getEnvironment();
environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
application.getWebProxyBase());
// Set AppSubmitTime and MaxAppAttempts to be consumable by the AM.
ApplicationId applicationId =
application.getAppAttemptId().getApplicationId();
environment.put(
ApplicationConstants.APP_SUBMIT_TIME_ENV,
String.valueOf(rmContext.getRMApps()
.get(applicationId)
.getSubmitTime()));
environment.put(ApplicationConstants.MAX_APP_ATTEMPTS_ENV,
String.valueOf(rmContext.getRMApps().get(
applicationId).getMaxAppAttempts()));
Credentials credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
ByteBuffer tokens = container.getTokens();
if (tokens != null) {
// TODO: Don't do this kind of checks everywhere.
dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
}
// Add AMRMToken
Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
if (amrmToken != null) {
credentials.addToken(amrmToken.getService(), amrmToken);
}
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
@VisibleForTesting
protected Token<AMRMTokenIdentifier> createAndSetAMRMToken() {
Token<AMRMTokenIdentifier> amrmToken =
this.rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(
application.getAppAttemptId());
((RMAppAttemptImpl)application).setAMRMToken(amrmToken);
return amrmToken;
}
@SuppressWarnings("unchecked")
public void run() {
switch (eventType) {
case LAUNCH:
try {
LOG.info("Launching master" + application.getAppAttemptId());
launch();
handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
RMAppAttemptEventType.LAUNCHED));
} catch(Exception ie) {
String message = "Error launching " + application.getAppAttemptId()
+ ". Got exception: " + StringUtils.stringifyException(ie);
LOG.info(message);
handler.handle(new RMAppAttemptLaunchFailedEvent(application
.getAppAttemptId(), message));
}
break;
case CLEANUP:
try {
LOG.info("Cleaning master " + application.getAppAttemptId());
cleanup();
} catch(IOException ie) {
LOG.info("Error cleaning master ", ie);
} catch (YarnException e) {
StringBuilder sb = new StringBuilder("Container ");
sb.append(masterContainer.getId().toString());
sb.append(" is not handled by this NodeManager");
if (!e.getMessage().contains(sb.toString())) {
// Ignoring if container is already killed by Node Manager.
LOG.info("Error cleaning master ", e);
}
}
break;
default:
LOG.warn("Received unknown event-type " + eventType + ". Ignoring.");
break;
}
}
private void parseAndThrowException(Throwable t) throws YarnException,
IOException {
if (t instanceof YarnException) {
throw (YarnException) t;
} else if (t instanceof InvalidToken) {
throw (InvalidToken) t;
} else {
throw (IOException) t;
}
}
}
| 11,843 | 38.089109 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncherEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
public class AMLauncherEvent extends AbstractEvent<AMLauncherEventType> {
private final RMAppAttempt appAttempt;
public AMLauncherEvent(AMLauncherEventType type, RMAppAttempt appAttempt) {
super(type);
this.appAttempt = appAttempt;
}
public RMAppAttempt getAppAttempt() {
return this.appAttempt;
}
}
| 1,334 | 34.131579 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.amlauncher;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
public class ApplicationMasterLauncher extends AbstractService implements
EventHandler<AMLauncherEvent> {
private static final Log LOG = LogFactory.getLog(
ApplicationMasterLauncher.class);
private ThreadPoolExecutor launcherPool;
private LauncherThread launcherHandlingThread;
private final BlockingQueue<Runnable> masterEvents
= new LinkedBlockingQueue<Runnable>();
protected final RMContext context;
public ApplicationMasterLauncher(RMContext context) {
super(ApplicationMasterLauncher.class.getName());
this.context = context;
this.launcherHandlingThread = new LauncherThread();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
int threadCount = conf.getInt(
YarnConfiguration.RM_AMLAUNCHER_THREAD_COUNT,
YarnConfiguration.DEFAULT_RM_AMLAUNCHER_THREAD_COUNT);
ThreadFactory tf = new ThreadFactoryBuilder()
.setNameFormat("ApplicationMasterLauncher #%d")
.build();
launcherPool = new ThreadPoolExecutor(threadCount, threadCount, 1,
TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
launcherPool.setThreadFactory(tf);
Configuration newConf = new YarnConfiguration(conf);
newConf.setInt(CommonConfigurationKeysPublic.
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
conf.getInt(YarnConfiguration.RM_NODEMANAGER_CONNECT_RETIRES,
YarnConfiguration.DEFAULT_RM_NODEMANAGER_CONNECT_RETIRES));
setConfig(newConf);
super.serviceInit(newConf);
}
@Override
protected void serviceStart() throws Exception {
launcherHandlingThread.start();
super.serviceStart();
}
protected Runnable createRunnableLauncher(RMAppAttempt application,
AMLauncherEventType event) {
Runnable launcher =
new AMLauncher(context, application, event, getConfig());
return launcher;
}
private void launch(RMAppAttempt application) {
Runnable launcher = createRunnableLauncher(application,
AMLauncherEventType.LAUNCH);
masterEvents.add(launcher);
}
@Override
protected void serviceStop() throws Exception {
launcherHandlingThread.interrupt();
try {
launcherHandlingThread.join();
} catch (InterruptedException ie) {
LOG.info(launcherHandlingThread.getName() + " interrupted during join ",
ie); }
launcherPool.shutdown();
}
private class LauncherThread extends Thread {
public LauncherThread() {
super("ApplicationMaster Launcher");
}
@Override
public void run() {
while (!this.isInterrupted()) {
Runnable toLaunch;
try {
toLaunch = masterEvents.take();
launcherPool.execute(toLaunch);
} catch (InterruptedException e) {
LOG.warn(this.getClass().getName() + " interrupted. Returning.");
return;
}
}
}
}
private void cleanup(RMAppAttempt application) {
Runnable launcher = createRunnableLauncher(application, AMLauncherEventType.CLEANUP);
masterEvents.add(launcher);
}
@Override
public synchronized void handle(AMLauncherEvent appEvent) {
AMLauncherEventType event = appEvent.getType();
RMAppAttempt application = appEvent.getAppAttempt();
switch (event) {
case LAUNCH:
launch(application);
break;
case CLEANUP:
cleanup(application);
break;
default:
break;
}
}
}
| 5,087 | 32.92 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* Node usage report.
*/
@Private
@Stable
public class NodeReport {
private final Resource usedResources;
private final int numContainers;
public NodeReport(Resource used, int numContainers) {
this.usedResources = used;
this.numContainers = numContainers;
}
public Resource getUsedResources() {
return usedResources;
}
public int getNumContainers() {
return numContainers;
}
}
| 1,480 | 30.510638 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
/**
* The class that encapsulates response from clusterinfo for
* updates from the node managers.
*/
public class NodeResponse {
private final List<Container> completed;
private final List<Container> toCleanUp;
private final List<ApplicationId> finishedApplications;
public NodeResponse(List<ApplicationId> finishedApplications,
List<Container> completed, List<Container> toKill) {
this.finishedApplications = finishedApplications;
this.completed = completed;
this.toCleanUp = toKill;
}
public List<ApplicationId> getFinishedApplications() {
return this.finishedApplications;
}
public List<Container> getCompletedContainers() {
return this.completed;
}
public List<Container> getContainersToCleanUp() {
return this.toCleanUp;
}
}
| 1,798 | 34.27451 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* Utilities shared by schedulers.
*/
@Private
@Unstable
public class SchedulerUtils {
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
public static final String RELEASED_CONTAINER =
"Container released by application";
public static final String LOST_CONTAINER =
"Container released on a *lost* node";
public static final String PREEMPTED_CONTAINER =
"Container preempted by scheduler";
public static final String COMPLETED_APPLICATION =
"Container of a completed application";
public static final String EXPIRED_CONTAINER =
"Container expired since it was unused";
public static final String UNRESERVED_CONTAINER =
"Container reservation no longer required.";
/**
* Utility to create a {@link ContainerStatus} during exceptional
* circumstances.
*
* @param containerId {@link ContainerId} of returned/released/lost container.
* @param diagnostics diagnostic message
* @return <code>ContainerStatus</code> for an returned/released/lost
* container
*/
public static ContainerStatus createAbnormalContainerStatus(
ContainerId containerId, String diagnostics) {
return createAbnormalContainerStatus(containerId,
ContainerExitStatus.ABORTED, diagnostics);
}
/**
* Utility to create a {@link ContainerStatus} during exceptional
* circumstances.
*
* @param containerId {@link ContainerId} of returned/released/lost container.
* @param diagnostics diagnostic message
* @return <code>ContainerStatus</code> for an returned/released/lost
* container
*/
public static ContainerStatus createPreemptedContainerStatus(
ContainerId containerId, String diagnostics) {
return createAbnormalContainerStatus(containerId,
ContainerExitStatus.PREEMPTED, diagnostics);
}
/**
* Utility to create a {@link ContainerStatus} during exceptional
* circumstances.
*
* @param containerId {@link ContainerId} of returned/released/lost container.
* @param diagnostics diagnostic message
* @return <code>ContainerStatus</code> for an returned/released/lost
* container
*/
private static ContainerStatus createAbnormalContainerStatus(
ContainerId containerId, int exitStatus, String diagnostics) {
ContainerStatus containerStatus =
recordFactory.newRecordInstance(ContainerStatus.class);
containerStatus.setContainerId(containerId);
containerStatus.setDiagnostics(diagnostics);
containerStatus.setExitStatus(exitStatus);
containerStatus.setState(ContainerState.COMPLETE);
return containerStatus;
}
/**
* Utility method to normalize a list of resource requests, by insuring that
* the memory for each request is a multiple of minMemory and is not zero.
*/
public static void normalizeRequests(
List<ResourceRequest> asks,
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource minimumResource,
Resource maximumResource) {
for (ResourceRequest ask : asks) {
normalizeRequest(
ask, resourceCalculator, clusterResource, minimumResource,
maximumResource, minimumResource);
}
}
/**
* Utility method to normalize a resource request, by insuring that the
* requested memory is a multiple of minMemory and is not zero.
*/
public static void normalizeRequest(
ResourceRequest ask,
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource minimumResource,
Resource maximumResource) {
Resource normalized =
Resources.normalize(
resourceCalculator, ask.getCapability(), minimumResource,
maximumResource, minimumResource);
ask.setCapability(normalized);
}
/**
* Utility method to normalize a list of resource requests, by insuring that
* the memory for each request is a multiple of minMemory and is not zero.
*/
public static void normalizeRequests(
List<ResourceRequest> asks,
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource minimumResource,
Resource maximumResource,
Resource incrementResource) {
for (ResourceRequest ask : asks) {
normalizeRequest(
ask, resourceCalculator, clusterResource, minimumResource,
maximumResource, incrementResource);
}
}
/**
* Utility method to normalize a resource request, by insuring that the
* requested memory is a multiple of minMemory and is not zero.
*/
public static void normalizeRequest(
ResourceRequest ask,
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource minimumResource,
Resource maximumResource,
Resource incrementResource) {
Resource normalized =
Resources.normalize(
resourceCalculator, ask.getCapability(), minimumResource,
maximumResource, incrementResource);
ask.setCapability(normalized);
}
private static void normalizeNodeLabelExpressionInRequest(
ResourceRequest resReq, QueueInfo queueInfo) {
String labelExp = resReq.getNodeLabelExpression();
// if queue has default label expression, and RR doesn't have, use the
// default label expression of queue
if (labelExp == null && queueInfo != null && ResourceRequest.ANY
.equals(resReq.getResourceName())) {
labelExp = queueInfo.getDefaultNodeLabelExpression();
}
// If labelExp still equals to null, set it to be NO_LABEL
if (labelExp == null) {
labelExp = RMNodeLabelsManager.NO_LABEL;
}
resReq.setNodeLabelExpression(labelExp);
}
public static void normalizeAndValidateRequest(ResourceRequest resReq,
Resource maximumResource, String queueName, YarnScheduler scheduler,
boolean isRecovery, RMContext rmContext)
throws InvalidResourceRequestException {
normalizeAndValidateRequest(resReq, maximumResource, queueName, scheduler,
isRecovery, rmContext, null);
}
public static void normalizeAndValidateRequest(ResourceRequest resReq,
Resource maximumResource, String queueName, YarnScheduler scheduler,
boolean isRecovery, RMContext rmContext, QueueInfo queueInfo)
throws InvalidResourceRequestException {
if (null == queueInfo) {
try {
queueInfo = scheduler.getQueueInfo(queueName, false, false);
} catch (IOException e) {
// it is possible queue cannot get when queue mapping is set, just ignore
// the queueInfo here, and move forward
}
}
SchedulerUtils.normalizeNodeLabelExpressionInRequest(resReq, queueInfo);
if (!isRecovery) {
validateResourceRequest(resReq, maximumResource, queueInfo, rmContext);
}
}
public static void normalizeAndvalidateRequest(ResourceRequest resReq,
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext)
throws InvalidResourceRequestException {
normalizeAndvalidateRequest(resReq, maximumResource, queueName, scheduler,
rmContext, null);
}
public static void normalizeAndvalidateRequest(ResourceRequest resReq,
Resource maximumResource, String queueName, YarnScheduler scheduler,
RMContext rmContext, QueueInfo queueInfo)
throws InvalidResourceRequestException {
normalizeAndValidateRequest(resReq, maximumResource, queueName, scheduler,
false, rmContext, queueInfo);
}
/**
* Utility method to validate a resource request, by insuring that the
* requested memory/vcore is non-negative and not greater than max
*
* @throws InvalidResourceRequestException when there is invalid request
*/
private static void validateResourceRequest(ResourceRequest resReq,
Resource maximumResource, QueueInfo queueInfo, RMContext rmContext)
throws InvalidResourceRequestException {
if (resReq.getCapability().getMemory() < 0 ||
resReq.getCapability().getMemory() > maximumResource.getMemory()) {
throw new InvalidResourceRequestException("Invalid resource request"
+ ", requested memory < 0"
+ ", or requested memory > max configured"
+ ", requestedMemory=" + resReq.getCapability().getMemory()
+ ", maxMemory=" + maximumResource.getMemory());
}
if (resReq.getCapability().getVirtualCores() < 0 ||
resReq.getCapability().getVirtualCores() >
maximumResource.getVirtualCores()) {
throw new InvalidResourceRequestException("Invalid resource request"
+ ", requested virtual cores < 0"
+ ", or requested virtual cores > max configured"
+ ", requestedVirtualCores="
+ resReq.getCapability().getVirtualCores()
+ ", maxVirtualCores=" + maximumResource.getVirtualCores());
}
String labelExp = resReq.getNodeLabelExpression();
// we don't allow specify label expression other than resourceName=ANY now
if (!ResourceRequest.ANY.equals(resReq.getResourceName())
&& labelExp != null && !labelExp.trim().isEmpty()) {
throw new InvalidResourceRequestException(
"Invailid resource request, queue=" + queueInfo.getQueueName()
+ " specified node label expression in a "
+ "resource request has resource name = "
+ resReq.getResourceName());
}
// we don't allow specify label expression with more than one node labels now
if (labelExp != null && labelExp.contains("&&")) {
throw new InvalidResourceRequestException(
"Invailid resource request, queue=" + queueInfo.getQueueName()
+ " specified more than one node label "
+ "in a node label expression, node label expression = "
+ labelExp);
}
if (labelExp != null && !labelExp.trim().isEmpty() && queueInfo != null) {
if (!checkQueueLabelExpression(queueInfo.getAccessibleNodeLabels(),
labelExp, rmContext)) {
throw new InvalidResourceRequestException("Invalid resource request"
+ ", queue="
+ queueInfo.getQueueName()
+ " doesn't have permission to access all labels "
+ "in resource request. labelExpression of resource request="
+ labelExp
+ ". Queue labels="
+ (queueInfo.getAccessibleNodeLabels() == null ? "" : StringUtils.join(queueInfo
.getAccessibleNodeLabels().iterator(), ',')));
}
}
}
/**
* Check queue label expression, check if node label in queue's
* node-label-expression existed in clusterNodeLabels if rmContext != null
*/
public static boolean checkQueueLabelExpression(Set<String> queueLabels,
String labelExpression, RMContext rmContext) {
// if label expression is empty, we can allocate container on any node
if (labelExpression == null) {
return true;
}
for (String str : labelExpression.split("&&")) {
str = str.trim();
if (!str.trim().isEmpty()) {
// check queue label
if (queueLabels == null) {
return false;
} else {
if (!queueLabels.contains(str)
&& !queueLabels.contains(RMNodeLabelsManager.ANY)) {
return false;
}
}
// check node label manager contains this label
if (null != rmContext) {
RMNodeLabelsManager nlm = rmContext.getNodeLabelManager();
if (nlm != null && !nlm.containsNodeLabel(str)) {
return false;
}
}
}
}
return true;
}
public static AccessType toAccessType(QueueACL acl) {
switch (acl) {
case ADMINISTER_QUEUE:
return AccessType.ADMINISTER_QUEUE;
case SUBMIT_APPLICATIONS:
return AccessType.SUBMIT_APP;
}
return null;
}
public static boolean checkResourceRequestMatchingNodePartition(
ResourceRequest offswitchResourceRequest, String nodePartition,
SchedulingMode schedulingMode) {
// We will only look at node label = nodeLabelToLookAt according to
// schedulingMode and partition of node.
String nodePartitionToLookAt = null;
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
nodePartitionToLookAt = nodePartition;
} else {
nodePartitionToLookAt = RMNodeLabelsManager.NO_LABEL;
}
String askedNodePartition = offswitchResourceRequest.getNodeLabelExpression();
if (null == askedNodePartition) {
askedNodePartition = RMNodeLabelsManager.NO_LABEL;
}
return askedNodePartition.equals(nodePartitionToLookAt);
}
private static boolean hasPendingResourceRequest(ResourceCalculator rc,
ResourceUsage usage, String partitionToLookAt, Resource cluster) {
if (Resources.greaterThan(rc, cluster,
usage.getPending(partitionToLookAt), Resources.none())) {
return true;
}
return false;
}
@Private
public static boolean hasPendingResourceRequest(ResourceCalculator rc,
ResourceUsage usage, String nodePartition, Resource cluster,
SchedulingMode schedulingMode) {
String partitionToLookAt = nodePartition;
if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
partitionToLookAt = RMNodeLabelsManager.NO_LABEL;
}
return hasPendingResourceRequest(rc, usage, partitionToLookAt, cluster);
}
}
| 15,589 | 37.781095 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* Resource limits for queues/applications, this means max overall (please note
* that, it's not "extra") resource you can get.
*/
public class ResourceLimits {
private volatile Resource limit;
// This is special limit that goes with the RESERVE_CONT_LOOK_ALL_NODES
// config. This limit indicates how much we need to unreserve to allocate
// another container.
private volatile Resource amountNeededUnreserve;
// How much resource you can use for next allocation, if this isn't enough for
// next container allocation, you may need to consider unreserve some
// containers.
private volatile Resource headroom;
public ResourceLimits(Resource limit) {
this(limit, Resources.none());
}
public ResourceLimits(Resource limit, Resource amountNeededUnreserve) {
this.amountNeededUnreserve = amountNeededUnreserve;
this.headroom = limit;
this.limit = limit;
}
public Resource getLimit() {
return limit;
}
public Resource getHeadroom() {
return headroom;
}
public void setHeadroom(Resource headroom) {
this.headroom = headroom;
}
public Resource getAmountNeededUnreserve() {
return amountNeededUnreserve;
}
public void setLimit(Resource limit) {
this.limit = limit;
}
public void setAmountNeededUnreserve(Resource amountNeededUnreserve) {
this.amountNeededUnreserve = amountNeededUnreserve;
}
}
| 2,376 | 30.276316 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
/**
* Resource classification.
*/
public enum NodeType {
NODE_LOCAL(0), RACK_LOCAL(1), OFF_SWITCH(2);
public int index;
private NodeType(int index) {
this.index = index;
}
}
| 1,047 | 31.75 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.utils.Lock;
/**
* {@link ActiveUsersManager} tracks active users in the system.
* A user is deemed to be active if he has any running applications with
* outstanding resource requests.
*
* An active user is defined as someone with outstanding resource requests.
*/
@Private
public class ActiveUsersManager {
private static final Log LOG = LogFactory.getLog(ActiveUsersManager.class);
private final QueueMetrics metrics;
private int activeUsers = 0;
private Map<String, Set<ApplicationId>> usersApplications =
new HashMap<String, Set<ApplicationId>>();
public ActiveUsersManager(QueueMetrics metrics) {
this.metrics = metrics;
}
/**
* An application has new outstanding requests.
*
* @param user application user
* @param applicationId activated application
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
synchronized public void activateApplication(
String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps == null) {
userApps = new HashSet<ApplicationId>();
usersApplications.put(user, userApps);
++activeUsers;
metrics.incrActiveUsers();
LOG.debug("User " + user + " added to activeUsers, currently: " +
activeUsers);
}
if (userApps.add(applicationId)) {
metrics.activateApp(user);
}
}
/**
* An application has no more outstanding requests.
*
* @param user application user
* @param applicationId deactivated application
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
synchronized public void deactivateApplication(
String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps != null) {
if (userApps.remove(applicationId)) {
metrics.deactivateApp(user);
}
if (userApps.isEmpty()) {
usersApplications.remove(user);
--activeUsers;
metrics.decrActiveUsers();
LOG.debug("User " + user + " removed from activeUsers, currently: " +
activeUsers);
}
}
}
/**
* Get number of active users i.e. users with applications which have pending
* resource requests.
* @return number of active users
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
synchronized public int getNumActiveUsers() {
return activeUsers;
}
}
| 3,661 | 32.290909 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* This class keeps track of all the consumption of an application. This also
* keeps track of current running/completed containers for the application.
*/
@Private
@Unstable
public class AppSchedulingInfo {
private static final Log LOG = LogFactory.getLog(AppSchedulingInfo.class);
private final ApplicationAttemptId applicationAttemptId;
final ApplicationId applicationId;
private String queueName;
Queue queue;
final String user;
// TODO making containerIdCounter long
private final AtomicLong containerIdCounter;
private final int EPOCH_BIT_SHIFT = 40;
final Set<Priority> priorities = new TreeSet<Priority>(
new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
final Map<Priority, Map<String, ResourceRequest>> requests =
new ConcurrentHashMap<Priority, Map<String, ResourceRequest>>();
private Set<String> blacklist = new HashSet<String>();
//private final ApplicationStore store;
private ActiveUsersManager activeUsersManager;
/* Allocated by scheduler */
boolean pending = true; // for app metrics
private ResourceUsage appResourceUsage;
public AppSchedulingInfo(ApplicationAttemptId appAttemptId,
String user, Queue queue, ActiveUsersManager activeUsersManager,
long epoch, ResourceUsage appResourceUsage) {
this.applicationAttemptId = appAttemptId;
this.applicationId = appAttemptId.getApplicationId();
this.queue = queue;
this.queueName = queue.getQueueName();
this.user = user;
this.activeUsersManager = activeUsersManager;
this.containerIdCounter = new AtomicLong(epoch << EPOCH_BIT_SHIFT);
this.appResourceUsage = appResourceUsage;
}
public ApplicationId getApplicationId() {
return applicationId;
}
public ApplicationAttemptId getApplicationAttemptId() {
return applicationAttemptId;
}
public String getQueueName() {
return queueName;
}
public String getUser() {
return user;
}
public synchronized boolean isPending() {
return pending;
}
/**
* Clear any pending requests from this application.
*/
private synchronized void clearRequests() {
priorities.clear();
requests.clear();
LOG.info("Application " + applicationId + " requests cleared");
}
public long getNewContainerId() {
return this.containerIdCounter.incrementAndGet();
}
/**
* The ApplicationMaster is updating resource requirements for the
* application, by asking for more resources and releasing resources acquired
* by the application.
*
* @param requests resources to be acquired
* @param recoverPreemptedRequest recover Resource Request on preemption
* @return true if any resource was updated, false else
*/
synchronized public boolean updateResourceRequests(
List<ResourceRequest> requests, boolean recoverPreemptedRequest) {
QueueMetrics metrics = queue.getMetrics();
boolean anyResourcesUpdated = false;
// Update resource requests
for (ResourceRequest request : requests) {
Priority priority = request.getPriority();
String resourceName = request.getResourceName();
boolean updatePendingResources = false;
ResourceRequest lastRequest = null;
if (resourceName.equals(ResourceRequest.ANY)) {
if (LOG.isDebugEnabled()) {
LOG.debug("update:" + " application=" + applicationId + " request="
+ request);
}
updatePendingResources = true;
anyResourcesUpdated = true;
// Premature optimization?
// Assumes that we won't see more than one priority request updated
// in one call, reasonable assumption... however, it's totally safe
// to activate same application more than once.
// Thus we don't need another loop ala the one in decrementOutstanding()
// which is needed during deactivate.
if (request.getNumContainers() > 0) {
activeUsersManager.activateApplication(user, applicationId);
}
}
Map<String, ResourceRequest> asks = this.requests.get(priority);
if (asks == null) {
asks = new ConcurrentHashMap<String, ResourceRequest>();
this.requests.put(priority, asks);
this.priorities.add(priority);
}
lastRequest = asks.get(resourceName);
if (recoverPreemptedRequest && lastRequest != null) {
// Increment the number of containers to 1, as it is recovering a
// single container.
request.setNumContainers(lastRequest.getNumContainers() + 1);
}
asks.put(resourceName, request);
if (updatePendingResources) {
// Similarly, deactivate application?
if (request.getNumContainers() <= 0) {
LOG.info("checking for deactivate of application :"
+ this.applicationId);
checkForDeactivation();
}
int lastRequestContainers = lastRequest != null ? lastRequest
.getNumContainers() : 0;
Resource lastRequestCapability = lastRequest != null ? lastRequest
.getCapability() : Resources.none();
metrics.incrPendingResources(user, request.getNumContainers(),
request.getCapability());
metrics.decrPendingResources(user, lastRequestContainers,
lastRequestCapability);
// update queue:
Resource increasedResource = Resources.multiply(request.getCapability(),
request.getNumContainers());
queue.incPendingResource(
request.getNodeLabelExpression(),
increasedResource);
appResourceUsage.incPending(request.getNodeLabelExpression(), increasedResource);
if (lastRequest != null) {
Resource decreasedResource =
Resources.multiply(lastRequestCapability, lastRequestContainers);
queue.decPendingResource(lastRequest.getNodeLabelExpression(),
decreasedResource);
appResourceUsage.decPending(lastRequest.getNodeLabelExpression(),
decreasedResource);
}
}
}
return anyResourcesUpdated;
}
/**
* The ApplicationMaster is updating the blacklist
*
* @param blacklistAdditions resources to be added to the blacklist
* @param blacklistRemovals resources to be removed from the blacklist
*/
synchronized public void updateBlacklist(
List<String> blacklistAdditions, List<String> blacklistRemovals) {
// Add to blacklist
if (blacklistAdditions != null) {
blacklist.addAll(blacklistAdditions);
}
// Remove from blacklist
if (blacklistRemovals != null) {
blacklist.removeAll(blacklistRemovals);
}
}
synchronized public Collection<Priority> getPriorities() {
return priorities;
}
synchronized public Map<String, ResourceRequest> getResourceRequests(
Priority priority) {
return requests.get(priority);
}
public List<ResourceRequest> getAllResourceRequests() {
List<ResourceRequest> ret = new ArrayList<ResourceRequest>();
for (Map<String, ResourceRequest> r : requests.values()) {
ret.addAll(r.values());
}
return ret;
}
synchronized public ResourceRequest getResourceRequest(Priority priority,
String resourceName) {
Map<String, ResourceRequest> nodeRequests = requests.get(priority);
return (nodeRequests == null) ? null : nodeRequests.get(resourceName);
}
public synchronized Resource getResource(Priority priority) {
ResourceRequest request = getResourceRequest(priority, ResourceRequest.ANY);
return (request == null) ? null : request.getCapability();
}
public synchronized boolean isBlacklisted(String resourceName) {
return blacklist.contains(resourceName);
}
/**
* Resources have been allocated to this application by the resource
* scheduler. Track them.
*
* @param type
* the type of the node
* @param node
* the nodeinfo of the node
* @param priority
* the priority of the request.
* @param request
* the request
* @param container
* the containers allocated.
*/
synchronized public List<ResourceRequest> allocate(NodeType type,
SchedulerNode node, Priority priority, ResourceRequest request,
Container container) {
List<ResourceRequest> resourceRequests = new ArrayList<ResourceRequest>();
if (type == NodeType.NODE_LOCAL) {
allocateNodeLocal(node, priority, request, container, resourceRequests);
} else if (type == NodeType.RACK_LOCAL) {
allocateRackLocal(node, priority, request, container, resourceRequests);
} else {
allocateOffSwitch(node, priority, request, container, resourceRequests);
}
QueueMetrics metrics = queue.getMetrics();
if (pending) {
// once an allocation is done we assume the application is
// running from scheduler's POV.
pending = false;
metrics.runAppAttempt(applicationId, user);
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: applicationId=" + applicationId
+ " container=" + container.getId()
+ " host=" + container.getNodeId().toString()
+ " user=" + user
+ " resource=" + request.getCapability());
}
metrics.allocateResources(user, 1, request.getCapability(), true);
return resourceRequests;
}
/**
* The {@link ResourceScheduler} is allocating data-local resources to the
* application.
*
* @param allocatedContainers
* resources allocated to the application
*/
synchronized private void allocateNodeLocal(SchedulerNode node,
Priority priority, ResourceRequest nodeLocalRequest, Container container,
List<ResourceRequest> resourceRequests) {
// Update future requirements
decResourceRequest(node.getNodeName(), priority, nodeLocalRequest);
ResourceRequest rackLocalRequest = requests.get(priority).get(
node.getRackName());
decResourceRequest(node.getRackName(), priority, rackLocalRequest);
ResourceRequest offRackRequest = requests.get(priority).get(
ResourceRequest.ANY);
decrementOutstanding(offRackRequest);
// Update cloned NodeLocal, RackLocal and OffRack requests for recovery
resourceRequests.add(cloneResourceRequest(nodeLocalRequest));
resourceRequests.add(cloneResourceRequest(rackLocalRequest));
resourceRequests.add(cloneResourceRequest(offRackRequest));
}
private void decResourceRequest(String resourceName, Priority priority,
ResourceRequest request) {
request.setNumContainers(request.getNumContainers() - 1);
if (request.getNumContainers() == 0) {
requests.get(priority).remove(resourceName);
}
}
/**
* The {@link ResourceScheduler} is allocating data-local resources to the
* application.
*
* @param allocatedContainers
* resources allocated to the application
*/
synchronized private void allocateRackLocal(SchedulerNode node,
Priority priority, ResourceRequest rackLocalRequest, Container container,
List<ResourceRequest> resourceRequests) {
// Update future requirements
decResourceRequest(node.getRackName(), priority, rackLocalRequest);
ResourceRequest offRackRequest = requests.get(priority).get(
ResourceRequest.ANY);
decrementOutstanding(offRackRequest);
// Update cloned RackLocal and OffRack requests for recovery
resourceRequests.add(cloneResourceRequest(rackLocalRequest));
resourceRequests.add(cloneResourceRequest(offRackRequest));
}
/**
* The {@link ResourceScheduler} is allocating data-local resources to the
* application.
*
* @param allocatedContainers
* resources allocated to the application
*/
synchronized private void allocateOffSwitch(SchedulerNode node,
Priority priority, ResourceRequest offSwitchRequest, Container container,
List<ResourceRequest> resourceRequests) {
// Update future requirements
decrementOutstanding(offSwitchRequest);
// Update cloned OffRack requests for recovery
resourceRequests.add(cloneResourceRequest(offSwitchRequest));
}
synchronized private void decrementOutstanding(
ResourceRequest offSwitchRequest) {
int numOffSwitchContainers = offSwitchRequest.getNumContainers() - 1;
// Do not remove ANY
offSwitchRequest.setNumContainers(numOffSwitchContainers);
// Do we have any outstanding requests?
// If there is nothing, we need to deactivate this application
if (numOffSwitchContainers == 0) {
checkForDeactivation();
}
appResourceUsage.decPending(offSwitchRequest.getNodeLabelExpression(),
offSwitchRequest.getCapability());
queue.decPendingResource(offSwitchRequest.getNodeLabelExpression(),
offSwitchRequest.getCapability());
}
synchronized private void checkForDeactivation() {
boolean deactivate = true;
for (Priority priority : getPriorities()) {
ResourceRequest request = getResourceRequest(priority, ResourceRequest.ANY);
if (request != null) {
if (request.getNumContainers() > 0) {
deactivate = false;
break;
}
}
}
if (deactivate) {
activeUsersManager.deactivateApplication(user, applicationId);
}
}
synchronized public void move(Queue newQueue) {
QueueMetrics oldMetrics = queue.getMetrics();
QueueMetrics newMetrics = newQueue.getMetrics();
for (Map<String, ResourceRequest> asks : requests.values()) {
ResourceRequest request = asks.get(ResourceRequest.ANY);
if (request != null) {
oldMetrics.decrPendingResources(user, request.getNumContainers(),
request.getCapability());
newMetrics.incrPendingResources(user, request.getNumContainers(),
request.getCapability());
Resource delta = Resources.multiply(request.getCapability(),
request.getNumContainers());
// Update Queue
queue.decPendingResource(request.getNodeLabelExpression(), delta);
newQueue.incPendingResource(request.getNodeLabelExpression(), delta);
}
}
oldMetrics.moveAppFrom(this);
newMetrics.moveAppTo(this);
activeUsersManager.deactivateApplication(user, applicationId);
activeUsersManager = newQueue.getActiveUsersManager();
activeUsersManager.activateApplication(user, applicationId);
this.queue = newQueue;
this.queueName = newQueue.getQueueName();
}
synchronized public void stop(RMAppAttemptState rmAppAttemptFinalState) {
// clear pending resources metrics for the application
QueueMetrics metrics = queue.getMetrics();
for (Map<String, ResourceRequest> asks : requests.values()) {
ResourceRequest request = asks.get(ResourceRequest.ANY);
if (request != null) {
metrics.decrPendingResources(user, request.getNumContainers(),
request.getCapability());
// Update Queue
queue.decPendingResource(
request.getNodeLabelExpression(),
Resources.multiply(request.getCapability(),
request.getNumContainers()));
}
}
metrics.finishAppAttempt(applicationId, pending, user);
// Clear requests themselves
clearRequests();
}
public synchronized void setQueue(Queue queue) {
this.queue = queue;
}
public synchronized Set<String> getBlackList() {
return this.blacklist;
}
public synchronized Set<String> getBlackListCopy() {
return new HashSet<>(this.blacklist);
}
public synchronized void transferStateFromPreviousAppSchedulingInfo(
AppSchedulingInfo appInfo) {
// this.priorities = appInfo.getPriorities();
// this.requests = appInfo.getRequests();
this.blacklist = appInfo.getBlackList();
}
public synchronized void recoverContainer(RMContainer rmContainer) {
QueueMetrics metrics = queue.getMetrics();
if (pending) {
// If there was any container to recover, the application was
// running from scheduler's POV.
pending = false;
metrics.runAppAttempt(applicationId, user);
}
// Container is completed. Skip recovering resources.
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
metrics.allocateResources(user, 1, rmContainer.getAllocatedResource(),
false);
}
public ResourceRequest cloneResourceRequest(ResourceRequest request) {
ResourceRequest newRequest =
ResourceRequest.newInstance(request.getPriority(),
request.getResourceName(), request.getCapability(), 1,
request.getRelaxLocality(), request.getNodeLabelExpression());
return newRequest;
}
}
| 18,737 | 35.243714 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* Node usage report.
*/
@Private
@Stable
public class SchedulerNodeReport {
private final Resource used;
private final Resource avail;
private final int num;
public SchedulerNodeReport(SchedulerNode node) {
this.used = node.getUsedResource();
this.avail = node.getAvailableResource();
this.num = node.getNumContainers();
}
/**
* @return the amount of resources currently used by the node.
*/
public Resource getUsedResource() {
return used;
}
/**
* @return the amount of resources currently available on the node
*/
public Resource getAvailableResource() {
return avail;
}
/**
* @return the number of containers currently running on this node.
*/
public int getNumContainers() {
return num;
}
}
| 1,840 | 28.693548 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMoveEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerRecoverEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.SettableFuture;
@SuppressWarnings("unchecked")
@Private
@Unstable
public abstract class AbstractYarnScheduler
<T extends SchedulerApplicationAttempt, N extends SchedulerNode>
extends AbstractService implements ResourceScheduler {
private static final Log LOG = LogFactory.getLog(AbstractYarnScheduler.class);
// Nodes in the cluster, indexed by NodeId
protected Map<NodeId, N> nodes = new ConcurrentHashMap<NodeId, N>();
// Whole capacity of the cluster
protected Resource clusterResource = Resource.newInstance(0, 0);
protected Resource minimumAllocation;
private Resource maximumAllocation;
private Resource configuredMaximumAllocation;
private int maxNodeMemory = -1;
private int maxNodeVCores = -1;
private final ReadLock maxAllocReadLock;
private final WriteLock maxAllocWriteLock;
private boolean useConfiguredMaximumAllocationOnly = true;
private long configuredMaximumAllocationWaitTime;
protected RMContext rmContext;
/*
* All schedulers which are inheriting AbstractYarnScheduler should use
* concurrent version of 'applications' map.
*/
protected ConcurrentMap<ApplicationId, SchedulerApplication<T>> applications;
protected int nmExpireInterval;
protected final static List<Container> EMPTY_CONTAINER_LIST =
new ArrayList<Container>();
protected static final Allocation EMPTY_ALLOCATION = new Allocation(
EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
/**
* Construct the service.
*
* @param name service name
*/
public AbstractYarnScheduler(String name) {
super(name);
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.maxAllocReadLock = lock.readLock();
this.maxAllocWriteLock = lock.writeLock();
}
@Override
public void serviceInit(Configuration conf) throws Exception {
nmExpireInterval =
conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
configuredMaximumAllocationWaitTime =
conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
createReleaseCache();
super.serviceInit(conf);
}
public List<Container> getTransferredContainers(
ApplicationAttemptId currentAttempt) {
ApplicationId appId = currentAttempt.getApplicationId();
SchedulerApplication<T> app = applications.get(appId);
List<Container> containerList = new ArrayList<Container>();
RMApp appImpl = this.rmContext.getRMApps().get(appId);
if (appImpl.getApplicationSubmissionContext().getUnmanagedAM()) {
return containerList;
}
if (app == null) {
return containerList;
}
Collection<RMContainer> liveContainers =
app.getCurrentAppAttempt().getLiveContainers();
ContainerId amContainerId =
rmContext.getRMApps().get(appId).getCurrentAppAttempt()
.getMasterContainer().getId();
for (RMContainer rmContainer : liveContainers) {
if (!rmContainer.getContainerId().equals(amContainerId)) {
containerList.add(rmContainer.getContainer());
}
}
return containerList;
}
public Map<ApplicationId, SchedulerApplication<T>>
getSchedulerApplications() {
return applications;
}
@Override
public Resource getClusterResource() {
return clusterResource;
}
@Override
public Resource getMinimumResourceCapability() {
return minimumAllocation;
}
@Override
public Resource getMaximumResourceCapability() {
Resource maxResource;
maxAllocReadLock.lock();
try {
if (useConfiguredMaximumAllocationOnly) {
if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
> configuredMaximumAllocationWaitTime) {
useConfiguredMaximumAllocationOnly = false;
}
maxResource = Resources.clone(configuredMaximumAllocation);
} else {
maxResource = Resources.clone(maximumAllocation);
}
} finally {
maxAllocReadLock.unlock();
}
return maxResource;
}
@Override
public Resource getMaximumResourceCapability(String queueName) {
return getMaximumResourceCapability();
}
protected void initMaximumResourceCapability(Resource maximumAllocation) {
maxAllocWriteLock.lock();
try {
if (this.configuredMaximumAllocation == null) {
this.configuredMaximumAllocation = Resources.clone(maximumAllocation);
this.maximumAllocation = Resources.clone(maximumAllocation);
}
} finally {
maxAllocWriteLock.unlock();
}
}
protected synchronized void containerLaunchedOnNode(
ContainerId containerId, SchedulerNode node) {
// Get the application for the finished container
SchedulerApplicationAttempt application = getCurrentAttemptForContainer
(containerId);
if (application == null) {
LOG.info("Unknown application "
+ containerId.getApplicationAttemptId().getApplicationId()
+ " launched container " + containerId + " on node: " + node);
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeCleanContainerEvent(node.getNodeID(), containerId));
return;
}
application.containerLaunchedOnNode(containerId, node.getNodeID());
}
public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
SchedulerApplication<T> app =
applications.get(applicationAttemptId.getApplicationId());
return app == null ? null : app.getCurrentAppAttempt();
}
@Override
public SchedulerAppReport getSchedulerAppInfo(
ApplicationAttemptId appAttemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
if (attempt == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Request for appInfo of unknown attempt " + appAttemptId);
}
return null;
}
return new SchedulerAppReport(attempt);
}
@Override
public ApplicationResourceUsageReport getAppResourceUsageReport(
ApplicationAttemptId appAttemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
if (attempt == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Request for appInfo of unknown attempt " + appAttemptId);
}
return null;
}
return attempt.getResourceUsageReport();
}
public T getCurrentAttemptForContainer(ContainerId containerId) {
return getApplicationAttempt(containerId.getApplicationAttemptId());
}
@Override
public RMContainer getRMContainer(ContainerId containerId) {
SchedulerApplicationAttempt attempt =
getCurrentAttemptForContainer(containerId);
return (attempt == null) ? null : attempt.getRMContainer(containerId);
}
@Override
public SchedulerNodeReport getNodeReport(NodeId nodeId) {
N node = nodes.get(nodeId);
return node == null ? null : new SchedulerNodeReport(node);
}
@Override
public String moveApplication(ApplicationId appId, String newQueue)
throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support moving apps between queues");
}
public void removeQueue(String queueName) throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support removing queues");
}
@Override
public void addQueue(Queue newQueue) throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support this operation");
}
@Override
public void setEntitlement(String queue, QueueEntitlement entitlement)
throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support this operation");
}
private void killOrphanContainerOnNode(RMNode node,
NMContainerStatus container) {
if (!container.getContainerState().equals(ContainerState.COMPLETE)) {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeCleanContainerEvent(node.getNodeID(),
container.getContainerId()));
}
}
public synchronized void recoverContainersOnNode(
List<NMContainerStatus> containerReports, RMNode nm) {
if (!rmContext.isWorkPreservingRecoveryEnabled()
|| containerReports == null
|| (containerReports != null && containerReports.isEmpty())) {
return;
}
for (NMContainerStatus container : containerReports) {
ApplicationId appId =
container.getContainerId().getApplicationAttemptId().getApplicationId();
RMApp rmApp = rmContext.getRMApps().get(appId);
if (rmApp == null) {
LOG.error("Skip recovering container " + container
+ " for unknown application.");
killOrphanContainerOnNode(nm, container);
continue;
}
// Unmanaged AM recovery is addressed in YARN-1815
if (rmApp.getApplicationSubmissionContext().getUnmanagedAM()) {
LOG.info("Skip recovering container " + container + " for unmanaged AM."
+ rmApp.getApplicationId());
killOrphanContainerOnNode(nm, container);
continue;
}
SchedulerApplication<T> schedulerApp = applications.get(appId);
if (schedulerApp == null) {
LOG.info("Skip recovering container " + container
+ " for unknown SchedulerApplication. Application current state is "
+ rmApp.getState());
killOrphanContainerOnNode(nm, container);
continue;
}
LOG.info("Recovering container " + container);
SchedulerApplicationAttempt schedulerAttempt =
schedulerApp.getCurrentAppAttempt();
if (!rmApp.getApplicationSubmissionContext()
.getKeepContainersAcrossApplicationAttempts()) {
// Do not recover containers for stopped attempt or previous attempt.
if (schedulerAttempt.isStopped()
|| !schedulerAttempt.getApplicationAttemptId().equals(
container.getContainerId().getApplicationAttemptId())) {
LOG.info("Skip recovering container " + container
+ " for already stopped attempt.");
killOrphanContainerOnNode(nm, container);
continue;
}
}
// create container
RMContainer rmContainer = recoverAndCreateContainer(container, nm);
// recover RMContainer
rmContainer.handle(new RMContainerRecoverEvent(container.getContainerId(),
container));
// recover scheduler node
SchedulerNode schedulerNode = nodes.get(nm.getNodeID());
schedulerNode.recoverContainer(rmContainer);
// recover queue: update headroom etc.
Queue queue = schedulerAttempt.getQueue();
queue.recoverContainer(clusterResource, schedulerAttempt, rmContainer);
// recover scheduler attempt
schedulerAttempt.recoverContainer(schedulerNode, rmContainer);
// set master container for the current running AMContainer for this
// attempt.
RMAppAttempt appAttempt = rmApp.getCurrentAppAttempt();
if (appAttempt != null) {
Container masterContainer = appAttempt.getMasterContainer();
// Mark current running AMContainer's RMContainer based on the master
// container ID stored in AppAttempt.
if (masterContainer != null
&& masterContainer.getId().equals(rmContainer.getContainerId())) {
((RMContainerImpl)rmContainer).setAMContainer(true);
}
}
synchronized (schedulerAttempt) {
Set<ContainerId> releases = schedulerAttempt.getPendingRelease();
if (releases.contains(container.getContainerId())) {
// release the container
rmContainer.handle(new RMContainerFinishedEvent(container
.getContainerId(), SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(), SchedulerUtils.RELEASED_CONTAINER),
RMContainerEventType.RELEASED));
releases.remove(container.getContainerId());
LOG.info(container.getContainerId() + " is released by application.");
}
}
}
}
private RMContainer recoverAndCreateContainer(NMContainerStatus status,
RMNode node) {
Container container =
Container.newInstance(status.getContainerId(), node.getNodeID(),
node.getHttpAddress(), status.getAllocatedResource(),
status.getPriority(), null);
ApplicationAttemptId attemptId =
container.getId().getApplicationAttemptId();
RMContainer rmContainer =
new RMContainerImpl(container, attemptId, node.getNodeID(),
applications.get(attemptId.getApplicationId()).getUser(), rmContext,
status.getCreationTime(), status.getNodeLabelExpression());
return rmContainer;
}
/**
* Recover resource request back from RMContainer when a container is
* preempted before AM pulled the same. If container is pulled by
* AM, then RMContainer will not have resource request to recover.
* @param rmContainer
*/
protected void recoverResourceRequestForContainer(RMContainer rmContainer) {
List<ResourceRequest> requests = rmContainer.getResourceRequests();
// If container state is moved to ACQUIRED, request will be empty.
if (requests == null) {
return;
}
// Add resource request back to Scheduler.
SchedulerApplicationAttempt schedulerAttempt
= getCurrentAttemptForContainer(rmContainer.getContainerId());
if (schedulerAttempt != null) {
schedulerAttempt.recoverResourceRequests(requests);
}
}
protected void createReleaseCache() {
// Cleanup the cache after nm expire interval.
new Timer().schedule(new TimerTask() {
@Override
public void run() {
clearPendingContainerCache();
LOG.info("Release request cache is cleaned up");
}
}, nmExpireInterval);
}
@VisibleForTesting
public void clearPendingContainerCache() {
for (SchedulerApplication<T> app : applications.values()) {
T attempt = app.getCurrentAppAttempt();
if (attempt != null) {
synchronized (attempt) {
for (ContainerId containerId : attempt.getPendingRelease()) {
RMAuditLogger.logFailure(app.getUser(),
AuditConstants.RELEASE_CONTAINER,
"Unauthorized access or invalid container", "Scheduler",
"Trying to release container not owned by app "
+ "or with invalid id.", attempt.getApplicationId(),
containerId);
}
attempt.getPendingRelease().clear();
}
}
}
}
// clean up a completed container
protected abstract void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event);
protected void releaseContainers(List<ContainerId> containers,
SchedulerApplicationAttempt attempt) {
for (ContainerId containerId : containers) {
RMContainer rmContainer = getRMContainer(containerId);
if (rmContainer == null) {
if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
< nmExpireInterval) {
LOG.info(containerId + " doesn't exist. Add the container"
+ " to the release request cache as it maybe on recovery.");
synchronized (attempt) {
attempt.getPendingRelease().add(containerId);
}
} else {
RMAuditLogger.logFailure(attempt.getUser(),
AuditConstants.RELEASE_CONTAINER,
"Unauthorized access or invalid container", "Scheduler",
"Trying to release container not owned by app or with invalid id.",
attempt.getApplicationId(), containerId);
}
}
completedContainer(rmContainer,
SchedulerUtils.createAbnormalContainerStatus(containerId,
SchedulerUtils.RELEASED_CONTAINER), RMContainerEventType.RELEASED);
}
}
public SchedulerNode getSchedulerNode(NodeId nodeId) {
return nodes.get(nodeId);
}
@Override
public synchronized void moveAllApps(String sourceQueue, String destQueue)
throws YarnException {
// check if destination queue is a valid leaf queue
try {
getQueueInfo(destQueue, false, false);
} catch (IOException e) {
LOG.warn(e);
throw new YarnException(e);
}
// check if source queue is a valid
List<ApplicationAttemptId> apps = getAppsInQueue(sourceQueue);
if (apps == null) {
String errMsg = "The specified Queue: " + sourceQueue + " doesn't exist";
LOG.warn(errMsg);
throw new YarnException(errMsg);
}
// generate move events for each pending/running app
for (ApplicationAttemptId app : apps) {
SettableFuture<Object> future = SettableFuture.create();
this.rmContext
.getDispatcher()
.getEventHandler()
.handle(new RMAppMoveEvent(app.getApplicationId(), destQueue, future));
}
}
@Override
public synchronized void killAllAppsInQueue(String queueName)
throws YarnException {
// check if queue is a valid
List<ApplicationAttemptId> apps = getAppsInQueue(queueName);
if (apps == null) {
String errMsg = "The specified Queue: " + queueName + " doesn't exist";
LOG.warn(errMsg);
throw new YarnException(errMsg);
}
// generate kill events for each pending/running app
for (ApplicationAttemptId app : apps) {
this.rmContext
.getDispatcher()
.getEventHandler()
.handle(new RMAppEvent(app.getApplicationId(), RMAppEventType.KILL));
}
}
/**
* Process resource update on a node.
*/
public synchronized void updateNodeResource(RMNode nm,
ResourceOption resourceOption) {
SchedulerNode node = getSchedulerNode(nm.getNodeID());
Resource newResource = resourceOption.getResource();
Resource oldResource = node.getTotalResource();
if(!oldResource.equals(newResource)) {
// Notify NodeLabelsManager about this change
rmContext.getNodeLabelManager().updateNodeResource(nm.getNodeID(),
newResource);
// Log resource change
LOG.info("Update resource on node: " + node.getNodeName()
+ " from: " + oldResource + ", to: "
+ newResource);
nodes.remove(nm.getNodeID());
updateMaximumAllocation(node, false);
// update resource to node
node.setTotalResource(newResource);
nodes.put(nm.getNodeID(), (N)node);
updateMaximumAllocation(node, true);
// update resource to clusterResource
Resources.subtractFrom(clusterResource, oldResource);
Resources.addTo(clusterResource, newResource);
} else {
// Log resource change
LOG.warn("Update resource on node: " + node.getNodeName()
+ " with the same resource: " + newResource);
}
}
/** {@inheritDoc} */
@Override
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes() {
return EnumSet.of(SchedulerResourceTypes.MEMORY);
}
@Override
public Set<String> getPlanQueues() throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support reservations");
}
protected void updateMaximumAllocation(SchedulerNode node, boolean add) {
Resource totalResource = node.getTotalResource();
maxAllocWriteLock.lock();
try {
if (add) { // added node
int nodeMemory = totalResource.getMemory();
if (nodeMemory > maxNodeMemory) {
maxNodeMemory = nodeMemory;
maximumAllocation.setMemory(Math.min(
configuredMaximumAllocation.getMemory(), maxNodeMemory));
}
int nodeVCores = totalResource.getVirtualCores();
if (nodeVCores > maxNodeVCores) {
maxNodeVCores = nodeVCores;
maximumAllocation.setVirtualCores(Math.min(
configuredMaximumAllocation.getVirtualCores(), maxNodeVCores));
}
} else { // removed node
if (maxNodeMemory == totalResource.getMemory()) {
maxNodeMemory = -1;
}
if (maxNodeVCores == totalResource.getVirtualCores()) {
maxNodeVCores = -1;
}
// We only have to iterate through the nodes if the current max memory
// or vcores was equal to the removed node's
if (maxNodeMemory == -1 || maxNodeVCores == -1) {
for (Map.Entry<NodeId, N> nodeEntry : nodes.entrySet()) {
int nodeMemory =
nodeEntry.getValue().getTotalResource().getMemory();
if (nodeMemory > maxNodeMemory) {
maxNodeMemory = nodeMemory;
}
int nodeVCores =
nodeEntry.getValue().getTotalResource().getVirtualCores();
if (nodeVCores > maxNodeVCores) {
maxNodeVCores = nodeVCores;
}
}
if (maxNodeMemory == -1) { // no nodes
maximumAllocation.setMemory(configuredMaximumAllocation.getMemory());
} else {
maximumAllocation.setMemory(
Math.min(configuredMaximumAllocation.getMemory(), maxNodeMemory));
}
if (maxNodeVCores == -1) { // no nodes
maximumAllocation.setVirtualCores(configuredMaximumAllocation.getVirtualCores());
} else {
maximumAllocation.setVirtualCores(
Math.min(configuredMaximumAllocation.getVirtualCores(), maxNodeVCores));
}
}
}
} finally {
maxAllocWriteLock.unlock();
}
}
protected void refreshMaximumAllocation(Resource newMaxAlloc) {
maxAllocWriteLock.lock();
try {
configuredMaximumAllocation = Resources.clone(newMaxAlloc);
int maxMemory = newMaxAlloc.getMemory();
if (maxNodeMemory != -1) {
maxMemory = Math.min(maxMemory, maxNodeMemory);
}
int maxVcores = newMaxAlloc.getVirtualCores();
if (maxNodeVCores != -1) {
maxVcores = Math.min(maxVcores, maxNodeVCores);
}
maximumAllocation = Resources.createResource(maxMemory, maxVcores);
} finally {
maxAllocWriteLock.unlock();
}
}
public List<ResourceRequest> getPendingResourceRequestsForAttempt(
ApplicationAttemptId attemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(attemptId);
if (attempt != null) {
return attempt.getAppSchedulingInfo().getAllResourceRequests();
}
return null;
}
@Override
public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
String user, String queueName, ApplicationId applicationId)
throws YarnException {
// Dummy Implementation till Application Priority changes are done in
// specific scheduler.
return Priority.newInstance(0);
}
}
| 26,759 | 36.957447 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@Private
@Unstable
public class SchedulerApplication<T extends SchedulerApplicationAttempt> {
private Queue queue;
private final String user;
private T currentAttempt;
private volatile Priority priority;
public SchedulerApplication(Queue queue, String user) {
this.queue = queue;
this.user = user;
this.priority = null;
}
public SchedulerApplication(Queue queue, String user, Priority priority) {
this.queue = queue;
this.user = user;
this.priority = priority;
}
public Queue getQueue() {
return queue;
}
public void setQueue(Queue queue) {
this.queue = queue;
}
public String getUser() {
return user;
}
public T getCurrentAppAttempt() {
return currentAttempt;
}
public void setCurrentAppAttempt(T currentAttempt) {
this.currentAttempt = currentAttempt;
}
public void stop(RMAppState rmAppFinalState) {
queue.getMetrics().finishApp(user, rmAppFinalState);
}
public Priority getPriority() {
return priority;
}
public void setPriority(Priority priority) {
this.priority = priority;
// Also set priority in current running attempt
if (null != currentAttempt) {
currentAttempt.setPriority(priority);
}
}
}
| 2,356 | 27.059524 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TimeBucketMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.HashMap;
/**
* Create a set of buckets that hold key-time pairs. When the values of the
* buckets is queried, the number of objects with time differences in the
* different buckets is returned.
*/
class TimeBucketMetrics<OBJ> {
private final HashMap<OBJ, Long> map = new HashMap<OBJ, Long>();
private final int[] counts;
private final long[] cuts;
/**
* Create a set of buckets based on a set of time points. The number of
* buckets is one more than the number of points.
*/
TimeBucketMetrics(long[] cuts) {
this.cuts = cuts;
counts = new int[cuts.length + 1];
}
/**
* Add an object to be counted
*/
synchronized void add(OBJ key, long time) {
map.put(key, time);
}
/**
* Remove an object to be counted
*/
synchronized void remove(OBJ key) {
map.remove(key);
}
/**
* Find the bucket based on the cut points.
*/
private int findBucket(long val) {
for(int i=0; i < cuts.length; ++i) {
if (val < cuts[i]) {
return i;
}
}
return cuts.length;
}
/**
* Get the counts of how many keys are in each bucket. The same array is
* returned by each call to this method.
*/
synchronized int[] getBucketCounts(long now) {
for(int i=0; i < counts.length; ++i) {
counts[i] = 0;
}
for(Long time: map.values()) {
counts[findBucket(now - time)] += 1;
}
return counts;
}
}
| 2,298 | 27.382716 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PreemptableResourceScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
/**
* Interface for a scheduler that supports preemption/killing
*
*/
public interface PreemptableResourceScheduler extends ResourceScheduler {
/**
* If the scheduler support container reservations, this method is used to
* ask the scheduler to drop the reservation for the given container.
* @param container Reference to reserved container allocation.
*/
void dropContainerReservation(RMContainer container);
/**
* Ask the scheduler to obtain back the container from a specific application
* by issuing a preemption request
* @param aid the application from which we want to get a container back
* @param container the container we want back
*/
void preemptContainer(ApplicationAttemptId aid, RMContainer container);
/**
* Ask the scheduler to forcibly interrupt the container given as input
* @param container
*/
void killContainer(RMContainer container);
}
| 1,932 | 36.901961 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
/**
* Represents an application attempt, and the resources that the attempt is
* using.
*/
@Evolving
@LimitedPrivate("yarn")
public class SchedulerAppReport {
private final Collection<RMContainer> live;
private final Collection<RMContainer> reserved;
private final boolean pending;
public SchedulerAppReport(SchedulerApplicationAttempt app) {
this.live = app.getLiveContainers();
this.reserved = app.getReservedContainers();
this.pending = app.isPending();
}
/**
* Get the list of live containers
* @return All of the live containers
*/
public Collection<RMContainer> getLiveContainers() {
return live;
}
/**
* Get the list of reserved containers
* @return All of the reserved containers.
*/
public Collection<RMContainer> getReservedContainers() {
return reserved;
}
/**
* Is this application pending?
* @return true if it is else false.
*/
public boolean isPending() {
return pending;
}
}
| 2,115 | 29.666667 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
/**
* This interface is the one implemented by the schedulers. It mainly extends
* {@link YarnScheduler}.
*
*/
@LimitedPrivate("yarn")
@Evolving
public interface ResourceScheduler extends YarnScheduler, Recoverable {
/**
* Set RMContext for <code>ResourceScheduler</code>.
* This method should be called immediately after instantiating
* a scheduler once.
* @param rmContext created by ResourceManager
*/
void setRMContext(RMContext rmContext);
/**
* Re-initialize the <code>ResourceScheduler</code>.
* @param conf configuration
* @throws IOException
*/
void reinitialize(Configuration conf, RMContext rmContext) throws IOException;
}
| 1,893 | 34.735849 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerPreemptEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
/**
* Simple event class used to communicate containers unreservations, preemption, killing
*/
public class ContainerPreemptEvent extends SchedulerEvent {
private final ApplicationAttemptId aid;
private final RMContainer container;
public ContainerPreemptEvent(ApplicationAttemptId aid, RMContainer container,
SchedulerEventType type) {
super(type);
this.aid = aid;
this.container = container;
}
public RMContainer getContainer(){
return this.container;
}
public ApplicationAttemptId getAppId() {
return aid;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(super.toString());
sb.append(" ").append(getAppId());
sb.append(" ").append(getContainer().getContainerId());
return sb.toString();
}
}
| 1,968 | 32.948276 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerHealth.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class SchedulerHealth {
static public class DetailedInformation {
long timestamp;
NodeId nodeId;
ContainerId containerId;
String queue;
public DetailedInformation(long timestamp, NodeId nodeId,
ContainerId containerId, String queue) {
this.timestamp = timestamp;
this.nodeId = nodeId;
this.containerId = containerId;
this.queue = queue;
}
public long getTimestamp() {
return timestamp;
}
public NodeId getNodeId() {
return nodeId;
}
public ContainerId getContainerId() {
return containerId;
}
public String getQueue() {
return queue;
}
}
enum Operation {
ALLOCATION, RELEASE, PREEMPTION, RESERVATION, FULFILLED_RESERVATION
}
long lastSchedulerRunTime;
Map<Operation, Resource> lastSchedulerRunDetails;
Map<Operation, DetailedInformation> schedulerHealthDetails;
Map<Operation, Long> schedulerOperationCounts;
// this is for counts since the RM started, never reset
Map<Operation, Long> schedulerOperationAggregateCounts;
public SchedulerHealth() {
lastSchedulerRunDetails = new ConcurrentHashMap<>();
schedulerHealthDetails = new ConcurrentHashMap<>();
schedulerOperationCounts = new ConcurrentHashMap<>();
schedulerOperationAggregateCounts = new ConcurrentHashMap<>();
for (Operation op : Operation.values()) {
lastSchedulerRunDetails.put(op, Resource.newInstance(0, 0));
schedulerOperationCounts.put(op, 0L);
schedulerHealthDetails.put(op, new DetailedInformation(0, null, null,
null));
schedulerOperationAggregateCounts.put(op, 0L);
}
}
public void updateAllocation(long timestamp, NodeId nodeId,
ContainerId containerId, String queue) {
DetailedInformation di =
new DetailedInformation(timestamp, nodeId, containerId, queue);
schedulerHealthDetails.put(Operation.ALLOCATION, di);
}
public void updateRelease(long timestamp, NodeId nodeId,
ContainerId containerId, String queue) {
DetailedInformation di =
new DetailedInformation(timestamp, nodeId, containerId, queue);
schedulerHealthDetails.put(Operation.RELEASE, di);
}
public void updatePreemption(long timestamp, NodeId nodeId,
ContainerId containerId, String queue) {
DetailedInformation di =
new DetailedInformation(timestamp, nodeId, containerId, queue);
schedulerHealthDetails.put(Operation.PREEMPTION, di);
}
public void updateReservation(long timestamp, NodeId nodeId,
ContainerId containerId, String queue) {
DetailedInformation di =
new DetailedInformation(timestamp, nodeId, containerId, queue);
schedulerHealthDetails.put(Operation.RESERVATION, di);
}
public void updateSchedulerRunDetails(long timestamp, Resource allocated,
Resource reserved) {
lastSchedulerRunTime = timestamp;
lastSchedulerRunDetails.put(Operation.ALLOCATION, allocated);
lastSchedulerRunDetails.put(Operation.RESERVATION, reserved);
}
public void updateSchedulerReleaseDetails(long timestamp, Resource released) {
lastSchedulerRunTime = timestamp;
lastSchedulerRunDetails.put(Operation.RELEASE, released);
}
public void updateSchedulerReleaseCounts(long count) {
updateCounts(Operation.RELEASE, count);
}
public void updateSchedulerAllocationCounts(long count) {
updateCounts(Operation.ALLOCATION, count);
}
public void updateSchedulerReservationCounts(long count) {
updateCounts(Operation.RESERVATION, count);
}
public void updateSchedulerFulfilledReservationCounts(long count) {
updateCounts(Operation.FULFILLED_RESERVATION, count);
}
public void updateSchedulerPreemptionCounts(long count) {
updateCounts(Operation.PREEMPTION, count);
}
private void updateCounts(Operation op, long count) {
schedulerOperationCounts.put(op, count);
Long tmp = schedulerOperationAggregateCounts.get(op);
schedulerOperationAggregateCounts.put(op, tmp + count);
}
public long getLastSchedulerRunTime() {
return lastSchedulerRunTime;
}
private Resource getResourceDetails(Operation op) {
return lastSchedulerRunDetails.get(op);
}
public Resource getResourcesAllocated() {
return getResourceDetails(Operation.ALLOCATION);
}
public Resource getResourcesReserved() {
return getResourceDetails(Operation.RESERVATION);
}
public Resource getResourcesReleased() {
return getResourceDetails(Operation.RELEASE);
}
private DetailedInformation getDetailedInformation(Operation op) {
return schedulerHealthDetails.get(op);
}
public DetailedInformation getLastAllocationDetails() {
return getDetailedInformation(Operation.ALLOCATION);
}
public DetailedInformation getLastReleaseDetails() {
return getDetailedInformation(Operation.RELEASE);
}
public DetailedInformation getLastReservationDetails() {
return getDetailedInformation(Operation.RESERVATION);
}
public DetailedInformation getLastPreemptionDetails() {
return getDetailedInformation(Operation.PREEMPTION);
}
private Long getOperationCount(Operation op) {
return schedulerOperationCounts.get(op);
}
public Long getAllocationCount() {
return getOperationCount(Operation.ALLOCATION);
}
public Long getReleaseCount() {
return getOperationCount(Operation.RELEASE);
}
public Long getReservationCount() {
return getOperationCount(Operation.RESERVATION);
}
public Long getPreemptionCount() {
return getOperationCount(Operation.PREEMPTION);
}
private Long getAggregateOperationCount(Operation op) {
return schedulerOperationAggregateCounts.get(op);
}
public Long getAggregateAllocationCount() {
return getAggregateOperationCount(Operation.ALLOCATION);
}
public Long getAggregateReleaseCount() {
return getAggregateOperationCount(Operation.RELEASE);
}
public Long getAggregateReservationCount() {
return getAggregateOperationCount(Operation.RESERVATION);
}
public Long getAggregatePreemptionCount() {
return getAggregateOperationCount(Operation.PREEMPTION);
}
public Long getAggregateFulFilledReservationsCount() {
return getAggregateOperationCount(Operation.FULFILLED_RESERVATION);
}
}
| 7,354 | 30.033755 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@Evolving
@LimitedPrivate("yarn")
public interface Queue {
/**
* Get the queue name
* @return queue name
*/
String getQueueName();
/**
* Get the queue metrics
* @return the queue metrics
*/
QueueMetrics getMetrics();
/**
* Get queue information
* @param includeChildQueues include child queues?
* @param recursive recursively get child queue information?
* @return queue information
*/
QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive);
/**
* Get queue ACLs for given <code>user</code>.
* @param user username
* @return queue ACLs for user
*/
List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user);
boolean hasAccess(QueueACL acl, UserGroupInformation user);
public ActiveUsersManager getActiveUsersManager();
/**
* Recover the state of the queue for a given container.
* @param clusterResource the resource of the cluster
* @param schedulerAttempt the application for which the container was allocated
* @param rmContainer the container that was recovered.
*/
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer);
/**
* Get labels can be accessed of this queue
* labels={*}, means this queue can access any label
* labels={ }, means this queue cannot access any label except node without label
* labels={a, b, c} means this queue can access a or b or c
* @return labels
*/
public Set<String> getAccessibleNodeLabels();
/**
* Get default label expression of this queue. If label expression of
* ApplicationSubmissionContext and label expression of Resource Request not
* set, this will be used.
*
* @return default label expression
*/
public String getDefaultNodeLabelExpression();
/**
* When new outstanding resource is asked, calling this will increase pending
* resource in a queue.
*
* @param nodeLabel asked by application
* @param resourceToInc new resource asked
*/
public void incPendingResource(String nodeLabel, Resource resourceToInc);
/**
* When an outstanding resource is fulfilled or canceled, calling this will
* decrease pending resource in a queue.
*
* @param nodeLabel
* asked by application
* @param resourceToDec
* new resource asked
*/
public void decPendingResource(String nodeLabel, Resource resourceToDec);
/**
* Get the Default Application Priority for this queue
*
* @return default application priority
*/
public Priority getDefaultApplicationPriority();
}
| 4,103 | 32.639344 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueNotFoundException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@Private
public class QueueNotFoundException extends YarnRuntimeException {
private static final long serialVersionUID = 187239430L;
public QueueNotFoundException(String message) {
super(message);
}
}
| 1,218 | 35.939394 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.api.ContainerType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Multiset;
/**
* Represents an application attempt from the viewpoint of the scheduler.
* Each running app attempt in the RM corresponds to one instance
* of this class.
*/
@Private
@Unstable
public class SchedulerApplicationAttempt implements SchedulableEntity {
private static final Log LOG = LogFactory
.getLog(SchedulerApplicationAttempt.class);
private static final long MEM_AGGREGATE_ALLOCATION_CACHE_MSECS = 3000;
protected long lastMemoryAggregateAllocationUpdateTime = 0;
private long lastMemorySeconds = 0;
private long lastVcoreSeconds = 0;
protected final AppSchedulingInfo appSchedulingInfo;
protected ApplicationAttemptId attemptId;
protected Map<ContainerId, RMContainer> liveContainers =
new HashMap<ContainerId, RMContainer>();
protected final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
new HashMap<Priority, Map<NodeId, RMContainer>>();
private final Multiset<Priority> reReservations = HashMultiset.create();
private Resource resourceLimit = Resource.newInstance(0, 0);
private boolean unmanagedAM = true;
private boolean amRunning = false;
private LogAggregationContext logAggregationContext;
private Priority appPriority = null;
protected ResourceUsage attemptResourceUsage = new ResourceUsage();
private AtomicLong firstAllocationRequestSentTime = new AtomicLong(0);
private AtomicLong firstContainerAllocatedTime = new AtomicLong(0);
protected List<RMContainer> newlyAllocatedContainers =
new ArrayList<RMContainer>();
// This pendingRelease is used in work-preserving recovery scenario to keep
// track of the AM's outstanding release requests. RM on recovery could
// receive the release request form AM before it receives the container status
// from NM for recovery. In this case, the to-be-recovered containers reported
// by NM should not be recovered.
private Set<ContainerId> pendingRelease = null;
/**
* Count how many times the application has been given an opportunity to
* schedule a task at each priority. Each time the scheduler asks the
* application for a task at this priority, it is incremented, and each time
* the application successfully schedules a task (at rack or node local), it
* is reset to 0.
*/
Multiset<Priority> schedulingOpportunities = HashMultiset.create();
/**
* Count how many times the application has been given an opportunity to
* schedule a non-partitioned resource request at each priority. Each time the
* scheduler asks the application for a task at this priority, it is
* incremented, and each time the application successfully schedules a task,
* it is reset to 0 when schedule any task at corresponding priority.
*/
Multiset<Priority> missedNonPartitionedRequestSchedulingOpportunity =
HashMultiset.create();
// Time of the last container scheduled at the current allowed level
protected Map<Priority, Long> lastScheduledContainer =
new HashMap<Priority, Long>();
protected Queue queue;
protected boolean isStopped = false;
protected final RMContext rmContext;
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId,
String user, Queue queue, ActiveUsersManager activeUsersManager,
RMContext rmContext) {
Preconditions.checkNotNull(rmContext, "RMContext should not be null");
this.rmContext = rmContext;
this.appSchedulingInfo =
new AppSchedulingInfo(applicationAttemptId, user, queue,
activeUsersManager, rmContext.getEpoch(), attemptResourceUsage);
this.queue = queue;
this.pendingRelease = new HashSet<ContainerId>();
this.attemptId = applicationAttemptId;
if (rmContext.getRMApps() != null &&
rmContext.getRMApps()
.containsKey(applicationAttemptId.getApplicationId())) {
ApplicationSubmissionContext appSubmissionContext =
rmContext.getRMApps().get(applicationAttemptId.getApplicationId())
.getApplicationSubmissionContext();
if (appSubmissionContext != null) {
unmanagedAM = appSubmissionContext.getUnmanagedAM();
this.logAggregationContext =
appSubmissionContext.getLogAggregationContext();
}
}
}
/**
* Get the live containers of the application.
* @return live containers of the application
*/
public synchronized Collection<RMContainer> getLiveContainers() {
return new ArrayList<RMContainer>(liveContainers.values());
}
public AppSchedulingInfo getAppSchedulingInfo() {
return this.appSchedulingInfo;
}
/**
* Is this application pending?
* @return true if it is else false.
*/
public boolean isPending() {
return appSchedulingInfo.isPending();
}
/**
* Get {@link ApplicationAttemptId} of the application master.
* @return <code>ApplicationAttemptId</code> of the application master
*/
public ApplicationAttemptId getApplicationAttemptId() {
return appSchedulingInfo.getApplicationAttemptId();
}
public ApplicationId getApplicationId() {
return appSchedulingInfo.getApplicationId();
}
public String getUser() {
return appSchedulingInfo.getUser();
}
public Map<String, ResourceRequest> getResourceRequests(Priority priority) {
return appSchedulingInfo.getResourceRequests(priority);
}
public Set<ContainerId> getPendingRelease() {
return this.pendingRelease;
}
public long getNewContainerId() {
return appSchedulingInfo.getNewContainerId();
}
public Collection<Priority> getPriorities() {
return appSchedulingInfo.getPriorities();
}
public synchronized ResourceRequest getResourceRequest(Priority priority, String resourceName) {
return this.appSchedulingInfo.getResourceRequest(priority, resourceName);
}
public synchronized int getTotalRequiredResources(Priority priority) {
return getResourceRequest(priority, ResourceRequest.ANY).getNumContainers();
}
public synchronized Resource getResource(Priority priority) {
return appSchedulingInfo.getResource(priority);
}
public String getQueueName() {
return appSchedulingInfo.getQueueName();
}
public Resource getAMResource() {
return attemptResourceUsage.getAMUsed();
}
public void setAMResource(Resource amResource) {
attemptResourceUsage.setAMUsed(amResource);
}
public boolean isAmRunning() {
return amRunning;
}
public void setAmRunning(boolean bool) {
amRunning = bool;
}
public boolean getUnmanagedAM() {
return unmanagedAM;
}
public synchronized RMContainer getRMContainer(ContainerId id) {
return liveContainers.get(id);
}
protected synchronized void resetReReservations(Priority priority) {
reReservations.setCount(priority, 0);
}
protected synchronized void addReReservation(Priority priority) {
reReservations.add(priority);
}
public synchronized int getReReservations(Priority priority) {
return reReservations.count(priority);
}
/**
* Get total current reservations.
* Used only by unit tests
* @return total current reservations
*/
@Stable
@Private
public synchronized Resource getCurrentReservation() {
return attemptResourceUsage.getReserved();
}
public Queue getQueue() {
return queue;
}
public synchronized boolean updateResourceRequests(
List<ResourceRequest> requests) {
if (!isStopped) {
return appSchedulingInfo.updateResourceRequests(requests, false);
}
return false;
}
public synchronized void recoverResourceRequests(
List<ResourceRequest> requests) {
if (!isStopped) {
appSchedulingInfo.updateResourceRequests(requests, true);
}
}
public synchronized void stop(RMAppAttemptState rmAppAttemptFinalState) {
// Cleanup all scheduling information
isStopped = true;
appSchedulingInfo.stop(rmAppAttemptFinalState);
}
public synchronized boolean isStopped() {
return isStopped;
}
/**
* Get the list of reserved containers
* @return All of the reserved containers.
*/
public synchronized List<RMContainer> getReservedContainers() {
List<RMContainer> reservedContainers = new ArrayList<RMContainer>();
for (Map.Entry<Priority, Map<NodeId, RMContainer>> e :
this.reservedContainers.entrySet()) {
reservedContainers.addAll(e.getValue().values());
}
return reservedContainers;
}
public synchronized RMContainer reserve(SchedulerNode node, Priority priority,
RMContainer rmContainer, Container container) {
// Create RMContainer if necessary
if (rmContainer == null) {
rmContainer =
new RMContainerImpl(container, getApplicationAttemptId(),
node.getNodeID(), appSchedulingInfo.getUser(), rmContext);
attemptResourceUsage.incReserved(node.getPartition(),
container.getResource());
// Reset the re-reservation count
resetReReservations(priority);
} else {
// Note down the re-reservation
addReReservation(priority);
}
rmContainer.handle(new RMContainerReservedEvent(container.getId(),
container.getResource(), node.getNodeID(), priority));
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
if (reservedContainers == null) {
reservedContainers = new HashMap<NodeId, RMContainer>();
this.reservedContainers.put(priority, reservedContainers);
}
reservedContainers.put(node.getNodeID(), rmContainer);
if (LOG.isDebugEnabled()) {
LOG.debug("Application attempt " + getApplicationAttemptId()
+ " reserved container " + rmContainer + " on node " + node
+ ". This attempt currently has " + reservedContainers.size()
+ " reserved containers at priority " + priority
+ "; currentReservation " + container.getResource());
}
return rmContainer;
}
/**
* Has the application reserved the given <code>node</code> at the
* given <code>priority</code>?
* @param node node to be checked
* @param priority priority of reserved container
* @return true is reserved, false if not
*/
public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
if (reservedContainers != null) {
return reservedContainers.containsKey(node.getNodeID());
}
return false;
}
public synchronized void setHeadroom(Resource globalLimit) {
this.resourceLimit = globalLimit;
}
/**
* Get available headroom in terms of resources for the application's user.
* @return available resource headroom
*/
public synchronized Resource getHeadroom() {
// Corner case to deal with applications being slightly over-limit
if (resourceLimit.getMemory() < 0) {
resourceLimit.setMemory(0);
}
return resourceLimit;
}
public synchronized int getNumReservedContainers(Priority priority) {
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
return (reservedContainers == null) ? 0 : reservedContainers.size();
}
@SuppressWarnings("unchecked")
public synchronized void containerLaunchedOnNode(ContainerId containerId,
NodeId nodeId) {
// Inform the container
RMContainer rmContainer = getRMContainer(containerId);
if (rmContainer == null) {
// Some unknown container sneaked into the system. Kill it.
rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeCleanContainerEvent(nodeId, containerId));
return;
}
rmContainer.handle(new RMContainerEvent(containerId,
RMContainerEventType.LAUNCHED));
}
public synchronized void showRequests() {
if (LOG.isDebugEnabled()) {
for (Priority priority : getPriorities()) {
Map<String, ResourceRequest> requests = getResourceRequests(priority);
if (requests != null) {
LOG.debug("showRequests:" + " application=" + getApplicationId()
+ " headRoom=" + getHeadroom() + " currentConsumption="
+ attemptResourceUsage.getUsed().getMemory());
for (ResourceRequest request : requests.values()) {
LOG.debug("showRequests:" + " application=" + getApplicationId()
+ " request=" + request);
}
}
}
}
}
public Resource getCurrentConsumption() {
return attemptResourceUsage.getUsed();
}
public static class ContainersAndNMTokensAllocation {
List<Container> containerList;
List<NMToken> nmTokenList;
public ContainersAndNMTokensAllocation(List<Container> containerList,
List<NMToken> nmTokenList) {
this.containerList = containerList;
this.nmTokenList = nmTokenList;
}
public List<Container> getContainerList() {
return containerList;
}
public List<NMToken> getNMTokenList() {
return nmTokenList;
}
}
// Create container token and NMToken altogether, if either of them fails for
// some reason like DNS unavailable, do not return this container and keep it
// in the newlyAllocatedContainers waiting to be refetched.
public synchronized ContainersAndNMTokensAllocation
pullNewlyAllocatedContainersAndNMTokens() {
List<Container> returnContainerList =
new ArrayList<Container>(newlyAllocatedContainers.size());
List<NMToken> nmTokens = new ArrayList<NMToken>();
for (Iterator<RMContainer> i = newlyAllocatedContainers.iterator(); i
.hasNext();) {
RMContainer rmContainer = i.next();
Container container = rmContainer.getContainer();
ContainerType containerType = ContainerType.TASK;
// The working knowledge is that masterContainer for AM is null as it
// itself is the master container.
RMAppAttempt appAttempt =
rmContext
.getRMApps()
.get(
container.getId().getApplicationAttemptId()
.getApplicationId()).getCurrentAppAttempt();
if (appAttempt.getMasterContainer() == null
&& appAttempt.getSubmissionContext().getUnmanagedAM() == false) {
containerType = ContainerType.APPLICATION_MASTER;
}
try {
// create container token and NMToken altogether.
container.setContainerToken(rmContext.getContainerTokenSecretManager()
.createContainerToken(container.getId(), container.getNodeId(),
getUser(), container.getResource(), container.getPriority(),
rmContainer.getCreationTime(), this.logAggregationContext,
rmContainer.getNodeLabelExpression(), containerType));
NMToken nmToken =
rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
getApplicationAttemptId(), container);
if (nmToken != null) {
nmTokens.add(nmToken);
}
} catch (IllegalArgumentException e) {
// DNS might be down, skip returning this container.
LOG.error("Error trying to assign container token and NM token to" +
" an allocated container " + container.getId(), e);
continue;
}
returnContainerList.add(container);
i.remove();
rmContainer.handle(new RMContainerEvent(rmContainer.getContainerId(),
RMContainerEventType.ACQUIRED));
}
return new ContainersAndNMTokensAllocation(returnContainerList, nmTokens);
}
public synchronized void updateBlacklist(
List<String> blacklistAdditions, List<String> blacklistRemovals) {
if (!isStopped) {
this.appSchedulingInfo.updateBlacklist(
blacklistAdditions, blacklistRemovals);
}
}
public boolean isBlacklisted(String resourceName) {
return this.appSchedulingInfo.isBlacklisted(resourceName);
}
public synchronized int addMissedNonPartitionedRequestSchedulingOpportunity(
Priority priority) {
missedNonPartitionedRequestSchedulingOpportunity.add(priority);
return missedNonPartitionedRequestSchedulingOpportunity.count(priority);
}
public synchronized void
resetMissedNonPartitionedRequestSchedulingOpportunity(Priority priority) {
missedNonPartitionedRequestSchedulingOpportunity.setCount(priority, 0);
}
public synchronized void addSchedulingOpportunity(Priority priority) {
schedulingOpportunities.setCount(priority,
schedulingOpportunities.count(priority) + 1);
}
public synchronized void subtractSchedulingOpportunity(Priority priority) {
int count = schedulingOpportunities.count(priority) - 1;
this.schedulingOpportunities.setCount(priority, Math.max(count, 0));
}
/**
* Return the number of times the application has been given an opportunity
* to schedule a task at the given priority since the last time it
* successfully did so.
*/
public synchronized int getSchedulingOpportunities(Priority priority) {
return schedulingOpportunities.count(priority);
}
/**
* Should be called when an application has successfully scheduled a container,
* or when the scheduling locality threshold is relaxed.
* Reset various internal counters which affect delay scheduling
*
* @param priority The priority of the container scheduled.
*/
public synchronized void resetSchedulingOpportunities(Priority priority) {
resetSchedulingOpportunities(priority, System.currentTimeMillis());
}
// used for continuous scheduling
public synchronized void resetSchedulingOpportunities(Priority priority,
long currentTimeMs) {
lastScheduledContainer.put(priority, currentTimeMs);
schedulingOpportunities.setCount(priority, 0);
}
synchronized AggregateAppResourceUsage getRunningAggregateAppResourceUsage() {
long currentTimeMillis = System.currentTimeMillis();
// Don't walk the whole container list if the resources were computed
// recently.
if ((currentTimeMillis - lastMemoryAggregateAllocationUpdateTime)
> MEM_AGGREGATE_ALLOCATION_CACHE_MSECS) {
long memorySeconds = 0;
long vcoreSeconds = 0;
for (RMContainer rmContainer : this.liveContainers.values()) {
long usedMillis = currentTimeMillis - rmContainer.getCreationTime();
Resource resource = rmContainer.getContainer().getResource();
memorySeconds += resource.getMemory() * usedMillis /
DateUtils.MILLIS_PER_SECOND;
vcoreSeconds += resource.getVirtualCores() * usedMillis
/ DateUtils.MILLIS_PER_SECOND;
}
lastMemoryAggregateAllocationUpdateTime = currentTimeMillis;
lastMemorySeconds = memorySeconds;
lastVcoreSeconds = vcoreSeconds;
}
return new AggregateAppResourceUsage(lastMemorySeconds, lastVcoreSeconds);
}
public synchronized ApplicationResourceUsageReport getResourceUsageReport() {
AggregateAppResourceUsage runningResourceUsage =
getRunningAggregateAppResourceUsage();
Resource usedResourceClone =
Resources.clone(attemptResourceUsage.getAllUsed());
Resource reservedResourceClone =
Resources.clone(attemptResourceUsage.getReserved());
return ApplicationResourceUsageReport.newInstance(liveContainers.size(),
reservedContainers.size(), usedResourceClone, reservedResourceClone,
Resources.add(usedResourceClone, reservedResourceClone),
runningResourceUsage.getMemorySeconds(),
runningResourceUsage.getVcoreSeconds());
}
public synchronized Map<ContainerId, RMContainer> getLiveContainersMap() {
return this.liveContainers;
}
public synchronized Resource getResourceLimit() {
return this.resourceLimit;
}
public synchronized Map<Priority, Long> getLastScheduledContainer() {
return this.lastScheduledContainer;
}
public synchronized void transferStateFromPreviousAttempt(
SchedulerApplicationAttempt appAttempt) {
this.liveContainers = appAttempt.getLiveContainersMap();
// this.reReservations = appAttempt.reReservations;
this.attemptResourceUsage.copyAllUsed(appAttempt.attemptResourceUsage);
this.resourceLimit = appAttempt.getResourceLimit();
// this.currentReservation = appAttempt.currentReservation;
// this.newlyAllocatedContainers = appAttempt.newlyAllocatedContainers;
// this.schedulingOpportunities = appAttempt.schedulingOpportunities;
this.lastScheduledContainer = appAttempt.getLastScheduledContainer();
this.appSchedulingInfo
.transferStateFromPreviousAppSchedulingInfo(appAttempt.appSchedulingInfo);
}
public synchronized void move(Queue newQueue) {
QueueMetrics oldMetrics = queue.getMetrics();
QueueMetrics newMetrics = newQueue.getMetrics();
String user = getUser();
for (RMContainer liveContainer : liveContainers.values()) {
Resource resource = liveContainer.getContainer().getResource();
oldMetrics.releaseResources(user, 1, resource);
newMetrics.allocateResources(user, 1, resource, false);
}
for (Map<NodeId, RMContainer> map : reservedContainers.values()) {
for (RMContainer reservedContainer : map.values()) {
Resource resource = reservedContainer.getReservedResource();
oldMetrics.unreserveResource(user, resource);
newMetrics.reserveResource(user, resource);
}
}
appSchedulingInfo.move(newQueue);
this.queue = newQueue;
}
public synchronized void recoverContainer(SchedulerNode node,
RMContainer rmContainer) {
// recover app scheduling info
appSchedulingInfo.recoverContainer(rmContainer);
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
LOG.info("SchedulerAttempt " + getApplicationAttemptId()
+ " is recovering container " + rmContainer.getContainerId());
liveContainers.put(rmContainer.getContainerId(), rmContainer);
attemptResourceUsage.incUsed(node.getPartition(), rmContainer
.getContainer().getResource());
// resourceLimit: updated when LeafQueue#recoverContainer#allocateResource
// is called.
// newlyAllocatedContainers.add(rmContainer);
// schedulingOpportunities
// lastScheduledContainer
}
public void incNumAllocatedContainers(NodeType containerType,
NodeType requestType) {
RMAppAttempt attempt =
rmContext.getRMApps().get(attemptId.getApplicationId())
.getCurrentAppAttempt();
if (attempt != null) {
attempt.getRMAppAttemptMetrics().incNumAllocatedContainers(containerType,
requestType);
}
}
public void setApplicationHeadroomForMetrics(Resource headroom) {
RMAppAttempt attempt =
rmContext.getRMApps().get(attemptId.getApplicationId())
.getCurrentAppAttempt();
if (attempt != null) {
attempt.getRMAppAttemptMetrics().setApplicationAttemptHeadRoom(
Resources.clone(headroom));
}
}
public void recordContainerRequestTime(long value) {
firstAllocationRequestSentTime.compareAndSet(0, value);
}
public void recordContainerAllocationTime(long value) {
if (firstContainerAllocatedTime.compareAndSet(0, value)) {
long timediff = firstContainerAllocatedTime.longValue() -
firstAllocationRequestSentTime.longValue();
if (timediff > 0) {
queue.getMetrics().addAppAttemptFirstContainerAllocationDelay(timediff);
}
}
}
public Set<String> getBlacklistedNodes() {
return this.appSchedulingInfo.getBlackListCopy();
}
@Private
public boolean hasPendingResourceRequest(ResourceCalculator rc,
String nodePartition, Resource cluster,
SchedulingMode schedulingMode) {
return SchedulerUtils.hasPendingResourceRequest(rc,
this.attemptResourceUsage, nodePartition, cluster,
schedulingMode);
}
@VisibleForTesting
public ResourceUsage getAppAttemptResourceUsage() {
return this.attemptResourceUsage;
}
@Override
public Priority getPriority() {
return appPriority;
}
public void setPriority(Priority appPriority) {
this.appPriority = appPriority;
}
@Override
public String getId() {
return getApplicationId().toString();
}
@Override
public int compareInputOrderTo(SchedulableEntity other) {
if (other instanceof SchedulerApplicationAttempt) {
return getApplicationId().compareTo(
((SchedulerApplicationAttempt)other).getApplicationId());
}
return 1;//let other types go before this, if any
}
@Override
public synchronized ResourceUsage getSchedulingResourceUsage() {
return attemptResourceUsage;
}
}
| 28,215 | 36.07753 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
/**
* This interface is used by the components to talk to the
* scheduler for allocating of resources, cleaning up resources.
*
*/
public interface YarnScheduler extends EventHandler<SchedulerEvent> {
/**
* Get queue information
* @param queueName queue name
* @param includeChildQueues include child queues?
* @param recursive get children queues?
* @return queue information
* @throws IOException
*/
@Public
@Stable
public QueueInfo getQueueInfo(String queueName, boolean includeChildQueues,
boolean recursive) throws IOException;
/**
* Get acls for queues for current user.
* @return acls for queues for current user
*/
@Public
@Stable
public List<QueueUserACLInfo> getQueueUserAclInfo();
/**
* Get the whole resource capacity of the cluster.
* @return the whole resource capacity of the cluster.
*/
@LimitedPrivate("yarn")
@Unstable
public Resource getClusterResource();
/**
* Get minimum allocatable {@link Resource}.
* @return minimum allocatable resource
*/
@Public
@Stable
public Resource getMinimumResourceCapability();
/**
* Get maximum allocatable {@link Resource} at the cluster level.
* @return maximum allocatable resource
*/
@Public
@Stable
public Resource getMaximumResourceCapability();
/**
* Get maximum allocatable {@link Resource} for the queue specified.
* @param queueName queue name
* @return maximum allocatable resource
*/
@Public
@Stable
public Resource getMaximumResourceCapability(String queueName);
@LimitedPrivate("yarn")
@Evolving
ResourceCalculator getResourceCalculator();
/**
* Get the number of nodes available in the cluster.
* @return the number of available nodes.
*/
@Public
@Stable
public int getNumClusterNodes();
/**
* The main api between the ApplicationMaster and the Scheduler.
* The ApplicationMaster is updating his future resource requirements
* and may release containers he doens't need.
*
* @param appAttemptId
* @param ask
* @param release
* @param blacklistAdditions
* @param blacklistRemovals
* @return the {@link Allocation} for the application
*/
@Public
@Stable
Allocation
allocate(ApplicationAttemptId appAttemptId,
List<ResourceRequest> ask,
List<ContainerId> release,
List<String> blacklistAdditions,
List<String> blacklistRemovals);
/**
* Get node resource usage report.
* @param nodeId
* @return the {@link SchedulerNodeReport} for the node or null
* if nodeId does not point to a defined node.
*/
@LimitedPrivate("yarn")
@Stable
public SchedulerNodeReport getNodeReport(NodeId nodeId);
/**
* Get the Scheduler app for a given app attempt Id.
* @param appAttemptId the id of the application attempt
* @return SchedulerApp for this given attempt.
*/
@LimitedPrivate("yarn")
@Stable
SchedulerAppReport getSchedulerAppInfo(ApplicationAttemptId appAttemptId);
/**
* Get a resource usage report from a given app attempt ID.
* @param appAttemptId the id of the application attempt
* @return resource usage report for this given attempt
*/
@LimitedPrivate("yarn")
@Evolving
ApplicationResourceUsageReport getAppResourceUsageReport(
ApplicationAttemptId appAttemptId);
/**
* Get the root queue for the scheduler.
* @return the root queue for the scheduler.
*/
@LimitedPrivate("yarn")
@Evolving
QueueMetrics getRootQueueMetrics();
/**
* Check if the user has permission to perform the operation.
* If the user has {@link QueueACL#ADMINISTER_QUEUE} permission,
* this user can view/modify the applications in this queue
* @param callerUGI
* @param acl
* @param queueName
* @return <code>true</code> if the user has the permission,
* <code>false</code> otherwise
*/
boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName);
/**
* Gets the apps under a given queue
* @param queueName the name of the queue.
* @return a collection of app attempt ids in the given queue.
*/
@LimitedPrivate("yarn")
@Stable
public List<ApplicationAttemptId> getAppsInQueue(String queueName);
/**
* Get the container for the given containerId.
* @param containerId
* @return the container for the given containerId.
*/
@LimitedPrivate("yarn")
@Unstable
public RMContainer getRMContainer(ContainerId containerId);
/**
* Moves the given application to the given queue
* @param appId
* @param newQueue
* @return the name of the queue the application was placed into
* @throws YarnException if the move cannot be carried out
*/
@LimitedPrivate("yarn")
@Evolving
public String moveApplication(ApplicationId appId, String newQueue)
throws YarnException;
/**
* Completely drain sourceQueue of applications, by moving all of them to
* destQueue.
*
* @param sourceQueue
* @param destQueue
* @throws YarnException
*/
void moveAllApps(String sourceQueue, String destQueue) throws YarnException;
/**
* Terminate all applications in the specified queue.
*
* @param queueName the name of queue to be drained
* @throws YarnException
*/
void killAllAppsInQueue(String queueName) throws YarnException;
/**
* Remove an existing queue. Implementations might limit when a queue could be
* removed (e.g., must have zero entitlement, and no applications running, or
* must be a leaf, etc..).
*
* @param queueName name of the queue to remove
* @throws YarnException
*/
void removeQueue(String queueName) throws YarnException;
/**
* Add to the scheduler a new Queue. Implementations might limit what type of
* queues can be dynamically added (e.g., Queue must be a leaf, must be
* attached to existing parent, must have zero entitlement).
*
* @param newQueue the queue being added.
* @throws YarnException
*/
void addQueue(Queue newQueue) throws YarnException;
/**
* This method increase the entitlement for current queue (must respect
* invariants, e.g., no overcommit of parents, non negative, etc.).
* Entitlement is a general term for weights in FairScheduler, capacity for
* the CapacityScheduler, etc.
*
* @param queue the queue for which we change entitlement
* @param entitlement the new entitlement for the queue (capacity,
* maxCapacity, etc..)
* @throws YarnException
*/
void setEntitlement(String queue, QueueEntitlement entitlement)
throws YarnException;
/**
* Gets the list of names for queues managed by the Reservation System
* @return the list of queues which support reservations
*/
public Set<String> getPlanQueues() throws YarnException;
/**
* Return a collection of the resource types that are considered when
* scheduling
*
* @return an EnumSet containing the resource types
*/
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes();
/**
*
* Verify whether a submitted application priority is valid as per configured
* Queue
*
* @param priorityFromContext
* Submitted Application priority.
* @param user
* User who submitted the Application
* @param queueName
* Name of the Queue
* @param applicationId
* Application ID
* @return Updated Priority from scheduler
*/
public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
String user, String queueName, ApplicationId applicationId)
throws YarnException;
}
| 10,117 | 31.63871 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Splitter;
@InterfaceAudience.Private
@Metrics(context="yarn")
public class QueueMetrics implements MetricsSource {
@Metric("# of apps submitted") MutableCounterInt appsSubmitted;
@Metric("# of running apps") MutableGaugeInt appsRunning;
@Metric("# of pending apps") MutableGaugeInt appsPending;
@Metric("# of apps completed") MutableCounterInt appsCompleted;
@Metric("# of apps killed") MutableCounterInt appsKilled;
@Metric("# of apps failed") MutableCounterInt appsFailed;
@Metric("Allocated memory in MB") MutableGaugeInt allocatedMB;
@Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores;
@Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
@Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated;
@Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased;
@Metric("Available memory in MB") MutableGaugeInt availableMB;
@Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
@Metric("Pending memory allocation in MB") MutableGaugeInt pendingMB;
@Metric("Pending CPU allocation in virtual cores") MutableGaugeInt pendingVCores;
@Metric("# of pending containers") MutableGaugeInt pendingContainers;
@Metric("# of reserved memory in MB") MutableGaugeInt reservedMB;
@Metric("Reserved CPU in virtual cores") MutableGaugeInt reservedVCores;
@Metric("# of reserved containers") MutableGaugeInt reservedContainers;
@Metric("# of active users") MutableGaugeInt activeUsers;
@Metric("# of active applications") MutableGaugeInt activeApplications;
@Metric("App Attempt First Container Allocation Delay") MutableRate appAttemptFirstContainerAllocationDelay;
private final MutableGaugeInt[] runningTime;
private TimeBucketMetrics<ApplicationId> runBuckets;
static final Logger LOG = LoggerFactory.getLogger(QueueMetrics.class);
static final MetricsInfo RECORD_INFO = info("QueueMetrics",
"Metrics for the resource scheduler");
protected static final MetricsInfo QUEUE_INFO = info("Queue", "Metrics by queue");
protected static final MetricsInfo USER_INFO =
info("User", "Metrics by user");
static final Splitter Q_SPLITTER =
Splitter.on('.').omitEmptyStrings().trimResults();
protected final MetricsRegistry registry;
protected final String queueName;
protected final QueueMetrics parent;
protected final MetricsSystem metricsSystem;
protected final Map<String, QueueMetrics> users;
protected final Configuration conf;
protected QueueMetrics(MetricsSystem ms, String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
registry = new MetricsRegistry(RECORD_INFO);
this.queueName = queueName;
this.parent = parent != null ? parent.getMetrics() : null;
this.users = enableUserMetrics ? new HashMap<String, QueueMetrics>()
: null;
metricsSystem = ms;
this.conf = conf;
runningTime = buildBuckets(conf);
}
protected QueueMetrics tag(MetricsInfo info, String value) {
registry.tag(info, value);
return this;
}
protected static StringBuilder sourceName(String queueName) {
StringBuilder sb = new StringBuilder(RECORD_INFO.name());
int i = 0;
for (String node : Q_SPLITTER.split(queueName)) {
sb.append(",q").append(i++).append('=').append(node);
}
return sb;
}
public synchronized
static QueueMetrics forQueue(String queueName, Queue parent,
boolean enableUserMetrics,
Configuration conf) {
return forQueue(DefaultMetricsSystem.instance(), queueName, parent,
enableUserMetrics, conf);
}
/**
* Helper method to clear cache.
*/
@Private
public synchronized static void clearQueueMetrics() {
queueMetrics.clear();
}
/**
* Simple metrics cache to help prevent re-registrations.
*/
protected final static Map<String, QueueMetrics> queueMetrics =
new HashMap<String, QueueMetrics>();
public synchronized
static QueueMetrics forQueue(MetricsSystem ms, String queueName,
Queue parent, boolean enableUserMetrics,
Configuration conf) {
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics =
new QueueMetrics(ms, queueName, parent, enableUserMetrics, conf).
tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return metrics;
}
public synchronized QueueMetrics getUserMetrics(String userName) {
if (users == null) {
return null;
}
QueueMetrics metrics = users.get(userName);
if (metrics == null) {
metrics = new QueueMetrics(metricsSystem, queueName, null, false, conf);
users.put(userName, metrics);
metricsSystem.register(
sourceName(queueName).append(",user=").append(userName).toString(),
"Metrics for user '"+ userName +"' in queue '"+ queueName +"'",
metrics.tag(QUEUE_INFO, queueName).tag(USER_INFO, userName));
}
return metrics;
}
private ArrayList<Integer> parseInts(String value) {
ArrayList<Integer> result = new ArrayList<Integer>();
for(String s: value.split(",")) {
result.add(Integer.parseInt(s.trim()));
}
return result;
}
private MutableGaugeInt[] buildBuckets(Configuration conf) {
ArrayList<Integer> buckets =
parseInts(conf.get(YarnConfiguration.RM_METRICS_RUNTIME_BUCKETS,
YarnConfiguration.DEFAULT_RM_METRICS_RUNTIME_BUCKETS));
MutableGaugeInt[] result = new MutableGaugeInt[buckets.size() + 1];
result[0] = registry.newGauge("running_0", "", 0);
long[] cuts = new long[buckets.size()];
for(int i=0; i < buckets.size(); ++i) {
result[i+1] = registry.newGauge("running_" + buckets.get(i), "", 0);
cuts[i] = buckets.get(i) * 1000L * 60; // covert from min to ms
}
this.runBuckets = new TimeBucketMetrics<ApplicationId>(cuts);
return result;
}
private void updateRunningTime() {
int[] counts = runBuckets.getBucketCounts(System.currentTimeMillis());
for(int i=0; i < counts.length; ++i) {
runningTime[i].set(counts[i]);
}
}
public void getMetrics(MetricsCollector collector, boolean all) {
updateRunningTime();
registry.snapshot(collector.addRecord(registry.info()), all);
}
public void submitApp(String user) {
appsSubmitted.incr();
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.submitApp(user);
}
if (parent != null) {
parent.submitApp(user);
}
}
public void submitAppAttempt(String user) {
appsPending.incr();
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.submitAppAttempt(user);
}
if (parent != null) {
parent.submitAppAttempt(user);
}
}
public void runAppAttempt(ApplicationId appId, String user) {
runBuckets.add(appId, System.currentTimeMillis());
appsRunning.incr();
appsPending.decr();
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.runAppAttempt(appId, user);
}
if (parent != null) {
parent.runAppAttempt(appId, user);
}
}
public void finishAppAttempt(
ApplicationId appId, boolean isPending, String user) {
runBuckets.remove(appId);
if (isPending) {
appsPending.decr();
} else {
appsRunning.decr();
}
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.finishAppAttempt(appId, isPending, user);
}
if (parent != null) {
parent.finishAppAttempt(appId, isPending, user);
}
}
public void finishApp(String user, RMAppState rmAppFinalState) {
switch (rmAppFinalState) {
case KILLED: appsKilled.incr(); break;
case FAILED: appsFailed.incr(); break;
default: appsCompleted.incr(); break;
}
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.finishApp(user, rmAppFinalState);
}
if (parent != null) {
parent.finishApp(user, rmAppFinalState);
}
}
public void moveAppFrom(AppSchedulingInfo app) {
if (app.isPending()) {
appsPending.decr();
} else {
appsRunning.decr();
}
QueueMetrics userMetrics = getUserMetrics(app.getUser());
if (userMetrics != null) {
userMetrics.moveAppFrom(app);
}
if (parent != null) {
parent.moveAppFrom(app);
}
}
public void moveAppTo(AppSchedulingInfo app) {
if (app.isPending()) {
appsPending.incr();
} else {
appsRunning.incr();
}
QueueMetrics userMetrics = getUserMetrics(app.getUser());
if (userMetrics != null) {
userMetrics.moveAppTo(app);
}
if (parent != null) {
parent.moveAppTo(app);
}
}
/**
* Set available resources. To be called by scheduler periodically as
* resources become available.
* @param limit resource limit
*/
public void setAvailableResourcesToQueue(Resource limit) {
availableMB.set(limit.getMemory());
availableVCores.set(limit.getVirtualCores());
}
/**
* Set available resources. To be called by scheduler periodically as
* resources become available.
* @param user
* @param limit resource limit
*/
public void setAvailableResourcesToUser(String user, Resource limit) {
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.setAvailableResourcesToQueue(limit);
}
}
/**
* Increment pending resource metrics
* @param user
* @param containers
* @param res the TOTAL delta of resources note this is different from
* the other APIs which use per container resource
*/
public void incrPendingResources(String user, int containers, Resource res) {
_incrPendingResources(containers, res);
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.incrPendingResources(user, containers, res);
}
if (parent != null) {
parent.incrPendingResources(user, containers, res);
}
}
private void _incrPendingResources(int containers, Resource res) {
pendingContainers.incr(containers);
pendingMB.incr(res.getMemory() * containers);
pendingVCores.incr(res.getVirtualCores() * containers);
}
public void decrPendingResources(String user, int containers, Resource res) {
_decrPendingResources(containers, res);
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.decrPendingResources(user, containers, res);
}
if (parent != null) {
parent.decrPendingResources(user, containers, res);
}
}
private void _decrPendingResources(int containers, Resource res) {
pendingContainers.decr(containers);
pendingMB.decr(res.getMemory() * containers);
pendingVCores.decr(res.getVirtualCores() * containers);
}
public void allocateResources(String user, int containers, Resource res,
boolean decrPending) {
allocatedContainers.incr(containers);
aggregateContainersAllocated.incr(containers);
allocatedMB.incr(res.getMemory() * containers);
allocatedVCores.incr(res.getVirtualCores() * containers);
if (decrPending) {
_decrPendingResources(containers, res);
}
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.allocateResources(user, containers, res, decrPending);
}
if (parent != null) {
parent.allocateResources(user, containers, res, decrPending);
}
}
public void releaseResources(String user, int containers, Resource res) {
allocatedContainers.decr(containers);
aggregateContainersReleased.incr(containers);
allocatedMB.decr(res.getMemory() * containers);
allocatedVCores.decr(res.getVirtualCores() * containers);
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.releaseResources(user, containers, res);
}
if (parent != null) {
parent.releaseResources(user, containers, res);
}
}
public void reserveResource(String user, Resource res) {
reservedContainers.incr();
reservedMB.incr(res.getMemory());
reservedVCores.incr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.reserveResource(user, res);
}
if (parent != null) {
parent.reserveResource(user, res);
}
}
public void unreserveResource(String user, Resource res) {
reservedContainers.decr();
reservedMB.decr(res.getMemory());
reservedVCores.decr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.unreserveResource(user, res);
}
if (parent != null) {
parent.unreserveResource(user, res);
}
}
public void incrActiveUsers() {
activeUsers.incr();
}
public void decrActiveUsers() {
activeUsers.decr();
}
public void activateApp(String user) {
activeApplications.incr();
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.activateApp(user);
}
if (parent != null) {
parent.activateApp(user);
}
}
public void deactivateApp(String user) {
activeApplications.decr();
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
userMetrics.deactivateApp(user);
}
if (parent != null) {
parent.deactivateApp(user);
}
}
public void addAppAttemptFirstContainerAllocationDelay(long latency) {
appAttemptFirstContainerAllocationDelay.add(latency);
}
public int getAppsSubmitted() {
return appsSubmitted.value();
}
public int getAppsRunning() {
return appsRunning.value();
}
public int getAppsPending() {
return appsPending.value();
}
public int getAppsCompleted() {
return appsCompleted.value();
}
public int getAppsKilled() {
return appsKilled.value();
}
public int getAppsFailed() {
return appsFailed.value();
}
public Resource getAllocatedResources() {
return BuilderUtils.newResource(allocatedMB.value(), allocatedVCores.value());
}
public int getAllocatedMB() {
return allocatedMB.value();
}
public int getAllocatedVirtualCores() {
return allocatedVCores.value();
}
public int getAllocatedContainers() {
return allocatedContainers.value();
}
public int getAvailableMB() {
return availableMB.value();
}
public int getAvailableVirtualCores() {
return availableVCores.value();
}
public int getPendingMB() {
return pendingMB.value();
}
public int getPendingVirtualCores() {
return pendingVCores.value();
}
public int getPendingContainers() {
return pendingContainers.value();
}
public int getReservedMB() {
return reservedMB.value();
}
public int getReservedVirtualCores() {
return reservedVCores.value();
}
public int getReservedContainers() {
return reservedContainers.value();
}
public int getActiveUsers() {
return activeUsers.value();
}
public int getActiveApps() {
return activeApplications.value();
}
public MetricsSystem getMetricsSystem() {
return metricsSystem;
}
public long getAggregateAllocatedContainers() {
return aggregateContainersAllocated.value();
}
public long getAggegatedReleasedContainers() {
return aggregateContainersReleased.value();
}
}
| 18,178 | 31.175221 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.collect.ImmutableSet;
/**
* Represents a YARN Cluster Node from the viewpoint of the scheduler.
*/
@Private
@Unstable
public abstract class SchedulerNode {
private static final Log LOG = LogFactory.getLog(SchedulerNode.class);
private Resource availableResource = Resource.newInstance(0, 0);
private Resource usedResource = Resource.newInstance(0, 0);
private Resource totalResourceCapability;
private RMContainer reservedContainer;
private volatile int numContainers;
/* set of containers that are allocated containers */
private final Map<ContainerId, RMContainer> launchedContainers =
new HashMap<ContainerId, RMContainer>();
private final RMNode rmNode;
private final String nodeName;
private volatile Set<String> labels = null;
public SchedulerNode(RMNode node, boolean usePortForNodeName,
Set<String> labels) {
this.rmNode = node;
this.availableResource = Resources.clone(node.getTotalCapability());
this.totalResourceCapability = Resources.clone(node.getTotalCapability());
if (usePortForNodeName) {
nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort();
} else {
nodeName = rmNode.getHostName();
}
this.labels = ImmutableSet.copyOf(labels);
}
public SchedulerNode(RMNode node, boolean usePortForNodeName) {
this(node, usePortForNodeName, CommonNodeLabelsManager.EMPTY_STRING_SET);
}
public RMNode getRMNode() {
return this.rmNode;
}
/**
* Set total resources on the node.
* @param resource total resources on the node.
*/
public synchronized void setTotalResource(Resource resource){
this.totalResourceCapability = resource;
this.availableResource = Resources.subtract(totalResourceCapability,
this.usedResource);
}
/**
* Get the ID of the node which contains both its hostname and port.
*
* @return the ID of the node
*/
public NodeId getNodeID() {
return this.rmNode.getNodeID();
}
public String getHttpAddress() {
return this.rmNode.getHttpAddress();
}
/**
* Get the name of the node for scheduling matching decisions.
* <p>
* Typically this is the 'hostname' reported by the node, but it could be
* configured to be 'hostname:port' reported by the node via the
* {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant.
* The main usecase of this is Yarn minicluster to be able to differentiate
* node manager instances by their port number.
*
* @return name of the node for scheduling matching decisions.
*/
public String getNodeName() {
return nodeName;
}
/**
* Get rackname.
*
* @return rackname
*/
public String getRackName() {
return this.rmNode.getRackName();
}
/**
* The Scheduler has allocated containers on this node to the given
* application.
*
* @param rmContainer
* allocated container
*/
public synchronized void allocateContainer(RMContainer rmContainer) {
Container container = rmContainer.getContainer();
deductAvailableResource(container.getResource());
++numContainers;
launchedContainers.put(container.getId(), rmContainer);
LOG.info("Assigned container " + container.getId() + " of capacity "
+ container.getResource() + " on host " + rmNode.getNodeAddress()
+ ", which has " + numContainers + " containers, "
+ getUsedResource() + " used and " + getAvailableResource()
+ " available after allocation");
}
/**
* Get available resources on the node.
*
* @return available resources on the node
*/
public synchronized Resource getAvailableResource() {
return this.availableResource;
}
/**
* Get used resources on the node.
*
* @return used resources on the node
*/
public synchronized Resource getUsedResource() {
return this.usedResource;
}
/**
* Get total resources on the node.
*
* @return total resources on the node.
*/
public synchronized Resource getTotalResource() {
return this.totalResourceCapability;
}
public synchronized boolean isValidContainer(ContainerId containerId) {
if (launchedContainers.containsKey(containerId)) {
return true;
}
return false;
}
private synchronized void updateResource(Container container) {
addAvailableResource(container.getResource());
--numContainers;
}
/**
* Release an allocated container on this node.
*
* @param container
* container to be released
*/
public synchronized void releaseContainer(Container container) {
if (!isValidContainer(container.getId())) {
LOG.error("Invalid container released " + container);
return;
}
/* remove the containers from the nodemanger */
if (null != launchedContainers.remove(container.getId())) {
updateResource(container);
}
LOG.info("Released container " + container.getId() + " of capacity "
+ container.getResource() + " on host " + rmNode.getNodeAddress()
+ ", which currently has " + numContainers + " containers, "
+ getUsedResource() + " used and " + getAvailableResource()
+ " available" + ", release resources=" + true);
}
private synchronized void addAvailableResource(Resource resource) {
if (resource == null) {
LOG.error("Invalid resource addition of null resource for "
+ rmNode.getNodeAddress());
return;
}
Resources.addTo(availableResource, resource);
Resources.subtractFrom(usedResource, resource);
}
private synchronized void deductAvailableResource(Resource resource) {
if (resource == null) {
LOG.error("Invalid deduction of null resource for "
+ rmNode.getNodeAddress());
return;
}
Resources.subtractFrom(availableResource, resource);
Resources.addTo(usedResource, resource);
}
/**
* Reserve container for the attempt on this node.
*/
public abstract void reserveResource(SchedulerApplicationAttempt attempt,
Priority priority, RMContainer container);
/**
* Unreserve resources on this node.
*/
public abstract void unreserveResource(SchedulerApplicationAttempt attempt);
@Override
public String toString() {
return "host: " + rmNode.getNodeAddress() + " #containers="
+ getNumContainers() + " available=" + getAvailableResource()
+ " used=" + getUsedResource();
}
/**
* Get number of active containers on the node.
*
* @return number of active containers on the node
*/
public int getNumContainers() {
return numContainers;
}
public synchronized List<RMContainer> getRunningContainers() {
return new ArrayList<RMContainer>(launchedContainers.values());
}
public synchronized RMContainer getReservedContainer() {
return reservedContainer;
}
protected synchronized void
setReservedContainer(RMContainer reservedContainer) {
this.reservedContainer = reservedContainer;
}
public synchronized void recoverContainer(RMContainer rmContainer) {
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
allocateContainer(rmContainer);
}
public Set<String> getLabels() {
return labels;
}
public void updateLabels(Set<String> labels) {
this.labels = labels;
}
/**
* Get partition of which the node belongs to, if node-labels of this node is
* empty or null, it belongs to NO_LABEL partition. And since we only support
* one partition for each node (YARN-2694), first label will be its partition.
*/
public String getPartition() {
if (this.labels == null || this.labels.isEmpty()) {
return RMNodeLabelsManager.NO_LABEL;
} else {
return this.labels.iterator().next();
}
}
}
| 9,778 | 30.342949 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerDynamicEditException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.exceptions.YarnException;
public class SchedulerDynamicEditException extends YarnException {
private static final long serialVersionUID = 7100374511387193257L;
public SchedulerDynamicEditException(String string) {
super(string);
}
}
| 1,151 | 35 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.commons.logging.Log;
public class SchedulerAppUtils {
public static boolean isBlacklisted(SchedulerApplicationAttempt application,
SchedulerNode node, Log LOG) {
if (application.isBlacklisted(node.getNodeName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping 'host' " + node.getNodeName() +
" for " + application.getApplicationId() +
" since it has been blacklisted");
}
return true;
}
if (application.isBlacklisted(node.getRackName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping 'rack' " + node.getRackName() +
" for " + application.getApplicationId() +
" since it has been blacklisted");
}
return true;
}
return false;
}
}
| 1,664 | 32.979592 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
public class Allocation {
final List<Container> containers;
final Resource resourceLimit;
final Set<ContainerId> strictContainers;
final Set<ContainerId> fungibleContainers;
final List<ResourceRequest> fungibleResources;
final List<NMToken> nmTokens;
public Allocation(List<Container> containers, Resource resourceLimit,
Set<ContainerId> strictContainers, Set<ContainerId> fungibleContainers,
List<ResourceRequest> fungibleResources) {
this(containers, resourceLimit,strictContainers, fungibleContainers,
fungibleResources, null);
}
public Allocation(List<Container> containers, Resource resourceLimit,
Set<ContainerId> strictContainers, Set<ContainerId> fungibleContainers,
List<ResourceRequest> fungibleResources, List<NMToken> nmTokens) {
this.containers = containers;
this.resourceLimit = resourceLimit;
this.strictContainers = strictContainers;
this.fungibleContainers = fungibleContainers;
this.fungibleResources = fungibleResources;
this.nmTokens = nmTokens;
}
public List<Container> getContainers() {
return containers;
}
public Resource getResourceLimit() {
return resourceLimit;
}
public Set<ContainerId> getStrictContainerPreemptions() {
return strictContainers;
}
public Set<ContainerId> getContainerPreemptions() {
return fungibleContainers;
}
public List<ResourceRequest> getResourcePreemptions() {
return fungibleResources;
}
public List<NMToken> getNMTokens() {
return nmTokens;
}
}
| 2,718 | 32.158537 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* Resource Usage by Labels for following fields by label - AM resource (to
* enforce max-am-resource-by-label after YARN-2637) - Used resource (includes
* AM resource usage) - Reserved resource - Pending resource - Headroom
*
* This class can be used to track resource usage in queue/user/app.
*
* And it is thread-safe
*/
public class ResourceUsage {
private ReadLock readLock;
private WriteLock writeLock;
private Map<String, UsageByLabel> usages;
// short for no-label :)
private static final String NL = CommonNodeLabelsManager.NO_LABEL;
public ResourceUsage() {
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
usages = new HashMap<String, UsageByLabel>();
usages.put(NL, new UsageByLabel(NL));
}
// Usage enum here to make implement cleaner
private enum ResourceType {
//CACHED_USED and CACHED_PENDING may be read by anyone, but must only
//be written by ordering policies
USED(0), PENDING(1), AMUSED(2), RESERVED(3), CACHED_USED(4),
CACHED_PENDING(5);
private int idx;
private ResourceType(int value) {
this.idx = value;
}
}
private static class UsageByLabel {
// usage by label, contains all UsageType
private Resource[] resArr;
public UsageByLabel(String label) {
resArr = new Resource[ResourceType.values().length];
for (int i = 0; i < resArr.length; i++) {
resArr[i] = Resource.newInstance(0, 0);
};
}
public Resource getUsed() {
return resArr[ResourceType.USED.idx];
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{used=" + resArr[0] + "%, ");
sb.append("pending=" + resArr[1] + "%, ");
sb.append("am_used=" + resArr[2] + "%, ");
sb.append("reserved=" + resArr[3] + "%}");
return sb.toString();
}
}
/*
* Used
*/
public Resource getUsed() {
return getUsed(NL);
}
public Resource getUsed(String label) {
return _get(label, ResourceType.USED);
}
public Resource getCachedUsed(String label) {
return _get(label, ResourceType.CACHED_USED);
}
public Resource getCachedPending(String label) {
return _get(label, ResourceType.CACHED_PENDING);
}
public void incUsed(String label, Resource res) {
_inc(label, ResourceType.USED, res);
}
public void incUsed(Resource res) {
incUsed(NL, res);
}
public void decUsed(Resource res) {
decUsed(NL, res);
}
public void decUsed(String label, Resource res) {
_dec(label, ResourceType.USED, res);
}
public void setUsed(Resource res) {
setUsed(NL, res);
}
public void copyAllUsed(ResourceUsage other) {
try {
writeLock.lock();
for (Entry<String, UsageByLabel> entry : other.usages.entrySet()) {
setUsed(entry.getKey(), Resources.clone(entry.getValue().getUsed()));
}
} finally {
writeLock.unlock();
}
}
public void setUsed(String label, Resource res) {
_set(label, ResourceType.USED, res);
}
public void setCachedUsed(String label, Resource res) {
_set(label, ResourceType.CACHED_USED, res);
}
public void setCachedPending(String label, Resource res) {
_set(label, ResourceType.CACHED_PENDING, res);
}
/*
* Pending
*/
public Resource getPending() {
return getPending(NL);
}
public Resource getPending(String label) {
return _get(label, ResourceType.PENDING);
}
public void incPending(String label, Resource res) {
_inc(label, ResourceType.PENDING, res);
}
public void incPending(Resource res) {
incPending(NL, res);
}
public void decPending(Resource res) {
decPending(NL, res);
}
public void decPending(String label, Resource res) {
_dec(label, ResourceType.PENDING, res);
}
public void setPending(Resource res) {
setPending(NL, res);
}
public void setPending(String label, Resource res) {
_set(label, ResourceType.PENDING, res);
}
/*
* Reserved
*/
public Resource getReserved() {
return getReserved(NL);
}
public Resource getReserved(String label) {
return _get(label, ResourceType.RESERVED);
}
public void incReserved(String label, Resource res) {
_inc(label, ResourceType.RESERVED, res);
}
public void incReserved(Resource res) {
incReserved(NL, res);
}
public void decReserved(Resource res) {
decReserved(NL, res);
}
public void decReserved(String label, Resource res) {
_dec(label, ResourceType.RESERVED, res);
}
public void setReserved(Resource res) {
setReserved(NL, res);
}
public void setReserved(String label, Resource res) {
_set(label, ResourceType.RESERVED, res);
}
/*
* AM-Used
*/
public Resource getAMUsed() {
return getAMUsed(NL);
}
public Resource getAMUsed(String label) {
return _get(label, ResourceType.AMUSED);
}
public void incAMUsed(String label, Resource res) {
_inc(label, ResourceType.AMUSED, res);
}
public void incAMUsed(Resource res) {
incAMUsed(NL, res);
}
public void decAMUsed(Resource res) {
decAMUsed(NL, res);
}
public void decAMUsed(String label, Resource res) {
_dec(label, ResourceType.AMUSED, res);
}
public void setAMUsed(Resource res) {
setAMUsed(NL, res);
}
public void setAMUsed(String label, Resource res) {
_set(label, ResourceType.AMUSED, res);
}
private static Resource normalize(Resource res) {
if (res == null) {
return Resources.none();
}
return res;
}
private Resource _get(String label, ResourceType type) {
if (label == null) {
label = RMNodeLabelsManager.NO_LABEL;
}
try {
readLock.lock();
UsageByLabel usage = usages.get(label);
if (null == usage) {
return Resources.none();
}
return normalize(usage.resArr[type.idx]);
} finally {
readLock.unlock();
}
}
private Resource _getAll(ResourceType type) {
try {
readLock.lock();
Resource allOfType = Resources.createResource(0);
for (Map.Entry<String, UsageByLabel> usageEntry : usages.entrySet()) {
//all usages types are initialized
Resources.addTo(allOfType, usageEntry.getValue().resArr[type.idx]);
}
return allOfType;
} finally {
readLock.unlock();
}
}
public Resource getAllPending() {
return _getAll(ResourceType.PENDING);
}
public Resource getAllUsed() {
return _getAll(ResourceType.USED);
}
private UsageByLabel getAndAddIfMissing(String label) {
if (label == null) {
label = RMNodeLabelsManager.NO_LABEL;
}
if (!usages.containsKey(label)) {
UsageByLabel u = new UsageByLabel(label);
usages.put(label, u);
return u;
}
return usages.get(label);
}
private void _set(String label, ResourceType type, Resource res) {
try {
writeLock.lock();
UsageByLabel usage = getAndAddIfMissing(label);
usage.resArr[type.idx] = res;
} finally {
writeLock.unlock();
}
}
private void _inc(String label, ResourceType type, Resource res) {
try {
writeLock.lock();
UsageByLabel usage = getAndAddIfMissing(label);
Resources.addTo(usage.resArr[type.idx], res);
} finally {
writeLock.unlock();
}
}
private void _dec(String label, ResourceType type, Resource res) {
try {
writeLock.lock();
UsageByLabel usage = getAndAddIfMissing(label);
Resources.subtractFrom(usage.resArr[type.idx], res);
} finally {
writeLock.unlock();
}
}
public Resource getCachedDemand(String label) {
try {
readLock.lock();
Resource demand = Resources.createResource(0);
Resources.addTo(demand, getCachedUsed(label));
Resources.addTo(demand, getCachedPending(label));
return demand;
} finally {
readLock.unlock();
}
}
@Override
public String toString() {
try {
readLock.lock();
return usages.toString();
} finally {
readLock.unlock();
}
}
public Set<String> getNodePartitionsSet() {
try {
readLock.lock();
return usages.keySet();
} finally {
readLock.unlock();
}
}
}
| 9,722 | 24.189119 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoOrderingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
/**
* An OrderingPolicy which orders SchedulableEntities by input order
*/
public class FifoOrderingPolicy<S extends SchedulableEntity> extends AbstractComparatorOrderingPolicy<S> {
public FifoOrderingPolicy() {
this.comparator = new FifoComparator();
this.schedulableEntities = new TreeSet<S>(comparator);
}
@Override
public void configure(Map<String, String> conf) {
}
@Override
public void containerAllocated(S schedulableEntity,
RMContainer r) {
}
@Override
public void containerReleased(S schedulableEntity,
RMContainer r) {
}
@Override
public void demandUpdated(S schedulableEntity) {
}
@Override
public String getInfo() {
return "FifoOrderingPolicy";
}
}
| 1,790 | 28.85 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
/**
* A SchedulableEntity is a process to be scheduled,
* for example, an application / application attempt
*/
public interface SchedulableEntity {
/**
* Id - each entity must have a unique id
*/
public String getId();
/**
* Compare the passed SchedulableEntity to this one for input order.
* Input order is implementation defined and should reflect the
* correct ordering for first-in first-out processing
*/
public int compareInputOrderTo(SchedulableEntity other);
/**
* View of Resources wanted and consumed by the entity
*/
public ResourceUsage getSchedulingResourceUsage();
/**
* Get the priority of the application
*/
public Priority getPriority();
}
| 1,807 | 30.719298 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/CompoundComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
//Some policies will use multiple comparators joined together
class CompoundComparator implements Comparator<SchedulableEntity> {
List<Comparator<SchedulableEntity>> comparators;
CompoundComparator(List<Comparator<SchedulableEntity>> comparators) {
this.comparators = comparators;
}
@Override
public int compare(final SchedulableEntity r1, final SchedulableEntity r2) {
for (Comparator<SchedulableEntity> comparator : comparators) {
int result = comparator.compare(r1, r2);
if (result != 0) return result;
}
return 0;
}
}
| 1,642 | 36.340909 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import com.google.common.annotations.VisibleForTesting;
/**
* An OrderingPolicy which can serve as a baseclass for policies which can be
* expressed in terms of comparators
*/
public abstract class AbstractComparatorOrderingPolicy<S extends SchedulableEntity> implements OrderingPolicy<S> {
private static final Log LOG = LogFactory.getLog(OrderingPolicy.class);
protected TreeSet<S> schedulableEntities;
protected Comparator<SchedulableEntity> comparator;
protected Map<String, S> entitiesToReorder = new HashMap<String, S>();
public AbstractComparatorOrderingPolicy() { }
@Override
public Collection<S> getSchedulableEntities() {
return schedulableEntities;
}
@Override
public Iterator<S> getAssignmentIterator() {
reorderScheduleEntities();
return schedulableEntities.iterator();
}
@Override
public Iterator<S> getPreemptionIterator() {
reorderScheduleEntities();
return schedulableEntities.descendingIterator();
}
public static void updateSchedulingResourceUsage(ResourceUsage ru) {
ru.setCachedUsed(CommonNodeLabelsManager.ANY, ru.getAllUsed());
ru.setCachedPending(CommonNodeLabelsManager.ANY, ru.getAllPending());
}
protected void reorderSchedulableEntity(S schedulableEntity) {
//remove, update comparable data, and reinsert to update position in order
schedulableEntities.remove(schedulableEntity);
updateSchedulingResourceUsage(
schedulableEntity.getSchedulingResourceUsage());
schedulableEntities.add(schedulableEntity);
}
protected void reorderScheduleEntities() {
synchronized (entitiesToReorder) {
for (Map.Entry<String, S> entry :
entitiesToReorder.entrySet()) {
reorderSchedulableEntity(entry.getValue());
}
entitiesToReorder.clear();
}
}
protected void entityRequiresReordering(S schedulableEntity) {
synchronized (entitiesToReorder) {
entitiesToReorder.put(schedulableEntity.getId(), schedulableEntity);
}
}
@VisibleForTesting
public Comparator<SchedulableEntity> getComparator() {
return comparator;
}
@Override
public void addSchedulableEntity(S s) {
schedulableEntities.add(s);
}
@Override
public boolean removeSchedulableEntity(S s) {
synchronized (entitiesToReorder) {
entitiesToReorder.remove(s.getId());
}
return schedulableEntities.remove(s);
}
@Override
public void addAllSchedulableEntities(Collection<S> sc) {
schedulableEntities.addAll(sc);
}
@Override
public int getNumSchedulableEntities() {
return schedulableEntities.size();
}
@Override
public abstract void configure(Map<String, String> conf);
@Override
public abstract void containerAllocated(S schedulableEntity,
RMContainer r);
@Override
public abstract void containerReleased(S schedulableEntity,
RMContainer r);
@Override
public abstract void demandUpdated(S schedulableEntity);
@Override
public abstract String getInfo();
}
| 4,265 | 30.367647 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
/**
* OrderingPolicy is used by the scheduler to order SchedulableEntities for
* container assignment and preemption
*/
public interface OrderingPolicy<S extends SchedulableEntity> {
/*
* Note: OrderingPolicy depends upon external
* synchronization of all use of the SchedulableEntity Collection and
* Iterators for correctness and to avoid concurrent modification issues
*/
/**
* Get the collection of SchedulableEntities which are managed by this
* OrderingPolicy - should include processes returned by the Assignment and
* Preemption iterator with no guarantees regarding order
*/
public Collection<S> getSchedulableEntities();
/**
* Return an iterator over the collection of SchedulableEntities which orders
* them for container assignment
*/
public Iterator<S> getAssignmentIterator();
/**
* Return an iterator over the collection of SchedulableEntities which orders
* them for preemption
*/
public Iterator<S> getPreemptionIterator();
/**
* Add a SchedulableEntity to be managed for allocation and preemption
* ordering
*/
public void addSchedulableEntity(S s);
/**
* Remove a SchedulableEntity from management for allocation and preemption
* ordering
*/
public boolean removeSchedulableEntity(S s);
/**
* Add a collection of SchedulableEntities to be managed for allocation
* and preemption ordering
*/
public void addAllSchedulableEntities(Collection<S> sc);
/**
* Get the number of SchedulableEntities managed for allocation and
* preemption ordering
*/
public int getNumSchedulableEntities();
/**
* Provides configuration information for the policy from the scheduler
* configuration
*/
public void configure(Map<String, String> conf);
/**
* The passed SchedulableEntity has been allocated the passed Container,
* take appropriate action (depending on comparator, a reordering of the
* SchedulableEntity may be required)
*/
public void containerAllocated(S schedulableEntity,
RMContainer r);
/**
* The passed SchedulableEntity has released the passed Container,
* take appropriate action (depending on comparator, a reordering of the
* SchedulableEntity may be required)
*/
public void containerReleased(S schedulableEntity,
RMContainer r);
/**
* Demand Updated for the passed schedulableEntity, reorder if needed.
*/
void demandUpdated(S schedulableEntity);
/**
* Display information regarding configuration & status
*/
public String getInfo();
}
| 3,716 | 31.321739 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
/**
* An OrderingPolicy which orders SchedulableEntities for fairness (see
* FairScheduler
* FairSharePolicy), generally, processes with lesser usage are lesser. If
* sizedBasedWeight is set to true then an application with high demand
* may be prioritized ahead of an application with less usage. This
* is to offset the tendency to favor small apps, which could result in
* starvation for large apps if many small ones enter and leave the queue
* continuously (optional, default false)
*/
public class FairOrderingPolicy<S extends SchedulableEntity> extends AbstractComparatorOrderingPolicy<S> {
public static final String ENABLE_SIZE_BASED_WEIGHT =
"fair.enable-size-based-weight";
protected class FairComparator implements Comparator<SchedulableEntity> {
@Override
public int compare(final SchedulableEntity r1, final SchedulableEntity r2) {
int res = (int) Math.signum( getMagnitude(r1) - getMagnitude(r2) );
return res;
}
}
private CompoundComparator fairComparator;
private boolean sizeBasedWeight = false;
public FairOrderingPolicy() {
List<Comparator<SchedulableEntity>> comparators =
new ArrayList<Comparator<SchedulableEntity>>();
comparators.add(new FairComparator());
comparators.add(new FifoComparator());
fairComparator = new CompoundComparator(
comparators
);
this.comparator = fairComparator;
this.schedulableEntities = new TreeSet<S>(comparator);
}
private double getMagnitude(SchedulableEntity r) {
double mag = r.getSchedulingResourceUsage().getCachedUsed(
CommonNodeLabelsManager.ANY).getMemory();
if (sizeBasedWeight) {
double weight = Math.log1p(r.getSchedulingResourceUsage().getCachedDemand(
CommonNodeLabelsManager.ANY).getMemory()) / Math.log(2);
mag = mag / weight;
}
return mag;
}
@VisibleForTesting
public boolean getSizeBasedWeight() {
return sizeBasedWeight;
}
@VisibleForTesting
public void setSizeBasedWeight(boolean sizeBasedWeight) {
this.sizeBasedWeight = sizeBasedWeight;
}
@Override
public void configure(Map<String, String> conf) {
if (conf.containsKey(ENABLE_SIZE_BASED_WEIGHT)) {
sizeBasedWeight = Boolean.valueOf(conf.get(ENABLE_SIZE_BASED_WEIGHT));
}
}
@Override
public void containerAllocated(S schedulableEntity,
RMContainer r) {
entityRequiresReordering(schedulableEntity);
}
@Override
public void containerReleased(S schedulableEntity,
RMContainer r) {
entityRequiresReordering(schedulableEntity);
}
@Override
public void demandUpdated(S schedulableEntity) {
if (sizeBasedWeight) {
entityRequiresReordering(schedulableEntity);
}
}
@Override
public String getInfo() {
String sbw = sizeBasedWeight ? " with sizeBasedWeight" : "";
return "FairOrderingPolicy" + sbw;
}
}
| 4,051 | 32.213115 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
import java.util.*;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
/**
* A Comparator which orders SchedulableEntities by input order
*/
public class FifoComparator
implements Comparator<SchedulableEntity> {
@Override
public int compare(SchedulableEntity r1, SchedulableEntity r2) {
if (r1.getPriority() != null
&& !r1.getPriority().equals(r2.getPriority())) {
return r1.getPriority().compareTo(r2.getPriority());
}
int res = r1.compareInputOrderTo(r2);
return res;
}
}
| 1,503 | 33.976744 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/QueueEntitlement.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
public class QueueEntitlement {
private float capacity;
private float maxCapacity;
public QueueEntitlement(float capacity, float maxCapacity){
this.setCapacity(capacity);
this.maxCapacity = maxCapacity;
}
public float getMaxCapacity() {
return maxCapacity;
}
public void setMaxCapacity(float maxCapacity) {
this.maxCapacity = maxCapacity;
}
public float getCapacity() {
return capacity;
}
public void setCapacity(float capacity) {
this.capacity = capacity;
}
}
| 1,398 | 28.765957 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/AssignmentInformation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class AssignmentInformation {
public enum Operation {
ALLOCATION, RESERVATION
}
public static class AssignmentDetails {
public ContainerId containerId;
public String queue;
public AssignmentDetails(ContainerId containerId, String queue) {
this.containerId = containerId;
this.queue = queue;
}
}
private final Map<Operation, Integer> operationCounts;
private final Map<Operation, Resource> operationResources;
private final Map<Operation, List<AssignmentDetails>> operationDetails;
public AssignmentInformation() {
this.operationCounts = new HashMap<>();
this.operationResources = new HashMap<>();
this.operationDetails = new HashMap<>();
for (Operation op : Operation.values()) {
operationCounts.put(op, 0);
operationResources.put(op, Resource.newInstance(0, 0));
operationDetails.put(op, new ArrayList<AssignmentDetails>());
}
}
public int getNumAllocations() {
return operationCounts.get(Operation.ALLOCATION);
}
public void incrAllocations() {
increment(Operation.ALLOCATION, 1);
}
public void incrAllocations(int by) {
increment(Operation.ALLOCATION, by);
}
public int getNumReservations() {
return operationCounts.get(Operation.RESERVATION);
}
public void incrReservations() {
increment(Operation.RESERVATION, 1);
}
public void incrReservations(int by) {
increment(Operation.RESERVATION, by);
}
private void increment(Operation op, int by) {
operationCounts.put(op, operationCounts.get(op) + by);
}
public Resource getAllocated() {
return operationResources.get(Operation.ALLOCATION);
}
public Resource getReserved() {
return operationResources.get(Operation.RESERVATION);
}
private void addAssignmentDetails(Operation op, ContainerId containerId,
String queue) {
operationDetails.get(op).add(new AssignmentDetails(containerId, queue));
}
public void addAllocationDetails(ContainerId containerId, String queue) {
addAssignmentDetails(Operation.ALLOCATION, containerId, queue);
}
public void addReservationDetails(ContainerId containerId, String queue) {
addAssignmentDetails(Operation.RESERVATION, containerId, queue);
}
public List<AssignmentDetails> getAllocationDetails() {
return operationDetails.get(Operation.ALLOCATION);
}
public List<AssignmentDetails> getReservationDetails() {
return operationDetails.get(Operation.RESERVATION);
}
private ContainerId getFirstContainerIdFromOperation(Operation op) {
if (null != operationDetails.get(Operation.ALLOCATION)) {
List<AssignmentDetails> assignDetails =
operationDetails.get(Operation.ALLOCATION);
if (!assignDetails.isEmpty()) {
return assignDetails.get(0).containerId;
}
}
return null;
}
public ContainerId getFirstAllocatedOrReservedContainerId() {
ContainerId containerId = null;
containerId = getFirstContainerIdFromOperation(Operation.ALLOCATION);
if (null != containerId) {
return containerId;
}
return getFirstContainerIdFromOperation(Operation.RESERVATION);
}
}
| 4,488 | 30.612676 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSAssignment;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityHeadroomProvider;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.AllocationState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.ContainerAllocator;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.RegularContainerAllocator;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.ContainerAllocation;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
/**
* Represents an application attempt from the viewpoint of the FIFO or Capacity
* scheduler.
*/
@Private
@Unstable
public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
private static final Log LOG = LogFactory.getLog(FiCaSchedulerApp.class);
private final Set<ContainerId> containersToPreempt =
new HashSet<ContainerId>();
private CapacityHeadroomProvider headroomProvider;
private ResourceCalculator rc = new DefaultResourceCalculator();
private ResourceScheduler scheduler;
private ContainerAllocator containerAllocator;
public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
String user, Queue queue, ActiveUsersManager activeUsersManager,
RMContext rmContext) {
this(applicationAttemptId, user, queue, activeUsersManager, rmContext,
Priority.newInstance(0));
}
public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
String user, Queue queue, ActiveUsersManager activeUsersManager,
RMContext rmContext, Priority appPriority) {
super(applicationAttemptId, user, queue, activeUsersManager, rmContext);
RMApp rmApp = rmContext.getRMApps().get(getApplicationId());
Resource amResource;
if (rmApp == null || rmApp.getAMResourceRequest() == null) {
//the rmApp may be undefined (the resource manager checks for this too)
//and unmanaged applications do not provide an amResource request
//in these cases, provide a default using the scheduler
amResource = rmContext.getScheduler().getMinimumResourceCapability();
} else {
amResource = rmApp.getAMResourceRequest().getCapability();
}
setAMResource(amResource);
setPriority(appPriority);
scheduler = rmContext.getScheduler();
if (scheduler.getResourceCalculator() != null) {
rc = scheduler.getResourceCalculator();
}
containerAllocator = new RegularContainerAllocator(this, rc, rmContext);
}
synchronized public boolean containerCompleted(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event,
String partition) {
// Remove from the list of containers
if (null == liveContainers.remove(rmContainer.getContainerId())) {
return false;
}
// Remove from the list of newly allocated containers if found
newlyAllocatedContainers.remove(rmContainer);
Container container = rmContainer.getContainer();
ContainerId containerId = container.getId();
// Inform the container
rmContainer.handle(
new RMContainerFinishedEvent(
containerId,
containerStatus,
event)
);
LOG.info("Completed container: " + rmContainer.getContainerId() +
" in state: " + rmContainer.getState() + " event:" + event);
containersToPreempt.remove(rmContainer.getContainerId());
RMAuditLogger.logSuccess(getUser(),
AuditConstants.RELEASE_CONTAINER, "SchedulerApp",
getApplicationId(), containerId);
// Update usage metrics
Resource containerResource = rmContainer.getContainer().getResource();
queue.getMetrics().releaseResources(getUser(), 1, containerResource);
attemptResourceUsage.decUsed(partition, containerResource);
// Clear resource utilization metrics cache.
lastMemoryAggregateAllocationUpdateTime = -1;
return true;
}
synchronized public RMContainer allocate(NodeType type, FiCaSchedulerNode node,
Priority priority, ResourceRequest request,
Container container) {
if (isStopped) {
return null;
}
// Required sanity check - AM can call 'allocate' to update resource
// request without locking the scheduler, hence we need to check
if (getTotalRequiredResources(priority) <= 0) {
return null;
}
// Create RMContainer
RMContainer rmContainer =
new RMContainerImpl(container, this.getApplicationAttemptId(),
node.getNodeID(), appSchedulingInfo.getUser(), this.rmContext,
request.getNodeLabelExpression());
// Add it to allContainers list.
newlyAllocatedContainers.add(rmContainer);
liveContainers.put(container.getId(), rmContainer);
// Update consumption and track allocations
List<ResourceRequest> resourceRequestList = appSchedulingInfo.allocate(
type, node, priority, request, container);
attemptResourceUsage.incUsed(node.getPartition(),
container.getResource());
// Update resource requests related to "request" and store in RMContainer
((RMContainerImpl)rmContainer).setResourceRequests(resourceRequestList);
// Inform the container
rmContainer.handle(
new RMContainerEvent(container.getId(), RMContainerEventType.START));
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: applicationAttemptId="
+ container.getId().getApplicationAttemptId()
+ " container=" + container.getId() + " host="
+ container.getNodeId().getHost() + " type=" + type);
}
RMAuditLogger.logSuccess(getUser(),
AuditConstants.ALLOC_CONTAINER, "SchedulerApp",
getApplicationId(), container.getId());
return rmContainer;
}
public boolean unreserve(Priority priority,
FiCaSchedulerNode node, RMContainer rmContainer) {
// Done with the reservation?
if (unreserve(node, priority)) {
node.unreserveResource(this);
// Update reserved metrics
queue.getMetrics().unreserveResource(getUser(),
rmContainer.getContainer().getResource());
return true;
}
return false;
}
@VisibleForTesting
public synchronized boolean unreserve(FiCaSchedulerNode node, Priority priority) {
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
if (reservedContainers != null) {
RMContainer reservedContainer = reservedContainers.remove(node.getNodeID());
// unreserve is now triggered in new scenarios (preemption)
// as a consequence reservedcontainer might be null, adding NP-checks
if (reservedContainer != null
&& reservedContainer.getContainer() != null
&& reservedContainer.getContainer().getResource() != null) {
if (reservedContainers.isEmpty()) {
this.reservedContainers.remove(priority);
}
// Reset the re-reservation count
resetReReservations(priority);
Resource resource = reservedContainer.getContainer().getResource();
this.attemptResourceUsage.decReserved(node.getPartition(), resource);
LOG.info("Application " + getApplicationId() + " unreserved "
+ " on node " + node + ", currently has "
+ reservedContainers.size() + " at priority " + priority
+ "; currentReservation " + this.attemptResourceUsage.getReserved()
+ " on node-label=" + node.getPartition());
return true;
}
}
return false;
}
public synchronized float getLocalityWaitFactor(
Priority priority, int clusterNodes) {
// Estimate: Required unique resources (i.e. hosts + racks)
int requiredResources =
Math.max(this.getResourceRequests(priority).size() - 1, 0);
// waitFactor can't be more than '1'
// i.e. no point skipping more than clustersize opportunities
return Math.min(((float)requiredResources / clusterNodes), 1.0f);
}
public synchronized Resource getTotalPendingRequests() {
Resource ret = Resource.newInstance(0, 0);
for (ResourceRequest rr : appSchedulingInfo.getAllResourceRequests()) {
// to avoid double counting we count only "ANY" resource requests
if (ResourceRequest.isAnyLocation(rr.getResourceName())){
Resources.addTo(ret,
Resources.multiply(rr.getCapability(), rr.getNumContainers()));
}
}
return ret;
}
public synchronized void addPreemptContainer(ContainerId cont){
// ignore already completed containers
if (liveContainers.containsKey(cont)) {
containersToPreempt.add(cont);
}
}
/**
* This method produces an Allocation that includes the current view
* of the resources that will be allocated to and preempted from this
* application.
*
* @param rc
* @param clusterResource
* @param minimumAllocation
* @return an allocation
*/
public synchronized Allocation getAllocation(ResourceCalculator rc,
Resource clusterResource, Resource minimumAllocation) {
Set<ContainerId> currentContPreemption = Collections.unmodifiableSet(
new HashSet<ContainerId>(containersToPreempt));
containersToPreempt.clear();
Resource tot = Resource.newInstance(0, 0);
for(ContainerId c : currentContPreemption){
Resources.addTo(tot,
liveContainers.get(c).getContainer().getResource());
}
int numCont = (int) Math.ceil(
Resources.divide(rc, clusterResource, tot, minimumAllocation));
ResourceRequest rr = ResourceRequest.newInstance(
Priority.UNDEFINED, ResourceRequest.ANY,
minimumAllocation, numCont);
ContainersAndNMTokensAllocation allocation =
pullNewlyAllocatedContainersAndNMTokens();
Resource headroom = getHeadroom();
setApplicationHeadroomForMetrics(headroom);
return new Allocation(allocation.getContainerList(), headroom, null,
currentContPreemption, Collections.singletonList(rr),
allocation.getNMTokenList());
}
synchronized public NodeId getNodeIdToUnreserve(Priority priority,
Resource resourceNeedUnreserve, ResourceCalculator rc,
Resource clusterResource) {
// first go around make this algorithm simple and just grab first
// reservation that has enough resources
Map<NodeId, RMContainer> reservedContainers = this.reservedContainers
.get(priority);
if ((reservedContainers != null) && (!reservedContainers.isEmpty())) {
for (Map.Entry<NodeId, RMContainer> entry : reservedContainers.entrySet()) {
NodeId nodeId = entry.getKey();
Resource containerResource = entry.getValue().getContainer().getResource();
// make sure we unreserve one with at least the same amount of
// resources, otherwise could affect capacity limits
if (Resources.lessThanOrEqual(rc, clusterResource,
resourceNeedUnreserve, containerResource)) {
if (LOG.isDebugEnabled()) {
LOG.debug("unreserving node with reservation size: "
+ containerResource
+ " in order to allocate container with size: " + resourceNeedUnreserve);
}
return nodeId;
}
}
}
return null;
}
public synchronized void setHeadroomProvider(
CapacityHeadroomProvider headroomProvider) {
this.headroomProvider = headroomProvider;
}
public synchronized CapacityHeadroomProvider getHeadroomProvider() {
return headroomProvider;
}
@Override
public synchronized Resource getHeadroom() {
if (headroomProvider != null) {
return headroomProvider.getHeadroom();
}
return super.getHeadroom();
}
@Override
public synchronized void transferStateFromPreviousAttempt(
SchedulerApplicationAttempt appAttempt) {
super.transferStateFromPreviousAttempt(appAttempt);
this.headroomProvider =
((FiCaSchedulerApp) appAttempt).getHeadroomProvider();
}
public void reserve(Priority priority,
FiCaSchedulerNode node, RMContainer rmContainer, Container container) {
// Update reserved metrics if this is the first reservation
if (rmContainer == null) {
queue.getMetrics().reserveResource(
getUser(), container.getResource());
}
// Inform the application
rmContainer = super.reserve(node, priority, rmContainer, container);
// Update the node
node.reserveResource(this, priority, rmContainer);
}
@VisibleForTesting
public RMContainer findNodeToUnreserve(Resource clusterResource,
FiCaSchedulerNode node, Priority priority,
Resource minimumUnreservedResource) {
// need to unreserve some other container first
NodeId idToUnreserve =
getNodeIdToUnreserve(priority, minimumUnreservedResource,
rc, clusterResource);
if (idToUnreserve == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("checked to see if could unreserve for app but nothing "
+ "reserved that matches for this app");
}
return null;
}
FiCaSchedulerNode nodeToUnreserve =
((CapacityScheduler) scheduler).getNode(idToUnreserve);
if (nodeToUnreserve == null) {
LOG.error("node to unreserve doesn't exist, nodeid: " + idToUnreserve);
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("unreserving for app: " + getApplicationId()
+ " on nodeId: " + idToUnreserve
+ " in order to replace reserved application and place it on node: "
+ node.getNodeID() + " needing: " + minimumUnreservedResource);
}
// headroom
Resources.addTo(getHeadroom(), nodeToUnreserve
.getReservedContainer().getReservedResource());
return nodeToUnreserve.getReservedContainer();
}
public LeafQueue getCSLeafQueue() {
return (LeafQueue)queue;
}
private CSAssignment getCSAssignmentFromAllocateResult(
Resource clusterResource, ContainerAllocation result) {
// Handle skipped
boolean skipped =
(result.getAllocationState() == AllocationState.APP_SKIPPED);
CSAssignment assignment = new CSAssignment(skipped);
assignment.setApplication(this);
// Handle excess reservation
assignment.setExcessReservation(result.getContainerToBeUnreserved());
// If we allocated something
if (Resources.greaterThan(rc, clusterResource,
result.getResourceToBeAllocated(), Resources.none())) {
Resource allocatedResource = result.getResourceToBeAllocated();
Container updatedContainer = result.getUpdatedContainer();
assignment.setResource(allocatedResource);
assignment.setType(result.getContainerNodeType());
if (result.getAllocationState() == AllocationState.RESERVED) {
// This is a reserved container
LOG.info("Reserved container " + " application=" + getApplicationId()
+ " resource=" + allocatedResource + " queue="
+ this.toString() + " cluster=" + clusterResource);
assignment.getAssignmentInformation().addReservationDetails(
updatedContainer.getId(), getCSLeafQueue().getQueuePath());
assignment.getAssignmentInformation().incrReservations();
Resources.addTo(assignment.getAssignmentInformation().getReserved(),
allocatedResource);
assignment.setFulfilledReservation(true);
} else {
// This is a new container
// Inform the ordering policy
LOG.info("assignedContainer" + " application attempt="
+ getApplicationAttemptId() + " container="
+ updatedContainer.getId() + " queue=" + this + " clusterResource="
+ clusterResource);
getCSLeafQueue().getOrderingPolicy().containerAllocated(this,
getRMContainer(updatedContainer.getId()));
assignment.getAssignmentInformation().addAllocationDetails(
updatedContainer.getId(), getCSLeafQueue().getQueuePath());
assignment.getAssignmentInformation().incrAllocations();
Resources.addTo(assignment.getAssignmentInformation().getAllocated(),
allocatedResource);
}
}
return assignment;
}
public CSAssignment assignContainers(Resource clusterResource,
FiCaSchedulerNode node, ResourceLimits currentResourceLimits,
SchedulingMode schedulingMode) {
if (LOG.isDebugEnabled()) {
LOG.debug("pre-assignContainers for application "
+ getApplicationId());
showRequests();
}
// Check if application needs more resource, skip if it doesn't need more.
if (!hasPendingResourceRequest(rc,
node.getPartition(), clusterResource, schedulingMode)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip app_attempt=" + getApplicationAttemptId()
+ ", because it doesn't need more resource, schedulingMode="
+ schedulingMode.name() + " node-label=" + node.getPartition());
}
return CSAssignment.SKIP_ASSIGNMENT;
}
synchronized (this) {
// Schedule in priority order
for (Priority priority : getPriorities()) {
ContainerAllocation allocationResult =
containerAllocator.allocate(clusterResource, node,
schedulingMode, currentResourceLimits, priority, null);
// If it's a skipped allocation
AllocationState allocationState = allocationResult.getAllocationState();
if (allocationState == AllocationState.PRIORITY_SKIPPED) {
continue;
}
return getCSAssignmentFromAllocateResult(clusterResource,
allocationResult);
}
}
// We will reach here if we skipped all priorities of the app, so we will
// skip the app.
return CSAssignment.SKIP_ASSIGNMENT;
}
public synchronized CSAssignment assignReservedContainer(
FiCaSchedulerNode node, RMContainer rmContainer,
Resource clusterResource, SchedulingMode schedulingMode) {
ContainerAllocation result =
containerAllocator.allocate(clusterResource, node,
schedulingMode, new ResourceLimits(Resources.none()),
rmContainer.getReservedPriority(), rmContainer);
return getCSAssignmentFromAllocateResult(clusterResource, result);
}
}
| 21,698 | 38.596715 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
import org.apache.commons.logging.Log;
public class FiCaSchedulerUtils {
public static boolean isBlacklisted(FiCaSchedulerApp application,
FiCaSchedulerNode node, Log LOG) {
if (application.isBlacklisted(node.getNodeName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping 'host' " + node.getNodeName() +
" for " + application.getApplicationId() +
" since it has been blacklisted");
}
return true;
}
if (application.isBlacklisted(node.getRackName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping 'rack' " + node.getRackName() +
" for " + application.getApplicationId() +
" since it has been blacklisted");
}
return true;
}
return false;
}
}
| 1,674 | 33.183673 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
public class FiCaSchedulerNode extends SchedulerNode {
private static final Log LOG = LogFactory.getLog(FiCaSchedulerNode.class);
public FiCaSchedulerNode(RMNode node, boolean usePortForNodeName,
Set<String> nodeLabels) {
super(node, usePortForNodeName, nodeLabels);
}
public FiCaSchedulerNode(RMNode node, boolean usePortForNodeName) {
this(node, usePortForNodeName, CommonNodeLabelsManager.EMPTY_STRING_SET);
}
@Override
public synchronized void reserveResource(
SchedulerApplicationAttempt application, Priority priority,
RMContainer container) {
// Check if it's already reserved
RMContainer reservedContainer = getReservedContainer();
if (reservedContainer != null) {
// Sanity check
if (!container.getContainer().getNodeId().equals(getNodeID())) {
throw new IllegalStateException("Trying to reserve" +
" container " + container +
" on node " + container.getReservedNode() +
" when currently" + " reserved resource " + reservedContainer +
" on node " + reservedContainer.getReservedNode());
}
// Cannot reserve more than one application attempt on a given node!
// Reservation is still against attempt.
if (!reservedContainer.getContainer().getId().getApplicationAttemptId()
.equals(container.getContainer().getId().getApplicationAttemptId())) {
throw new IllegalStateException("Trying to reserve" +
" container " + container +
" for application " + application.getApplicationAttemptId() +
" when currently" +
" reserved container " + reservedContainer +
" on node " + this);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Updated reserved container "
+ container.getContainer().getId() + " on node " + this
+ " for application attempt "
+ application.getApplicationAttemptId());
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Reserved container "
+ container.getContainer().getId() + " on node " + this
+ " for application attempt "
+ application.getApplicationAttemptId());
}
}
setReservedContainer(container);
}
@Override
public synchronized void unreserveResource(
SchedulerApplicationAttempt application) {
// adding NP checks as this can now be called for preemption
if (getReservedContainer() != null
&& getReservedContainer().getContainer() != null
&& getReservedContainer().getContainer().getId() != null
&& getReservedContainer().getContainer().getId()
.getApplicationAttemptId() != null) {
// Cannot unreserve for wrong application...
ApplicationAttemptId reservedApplication =
getReservedContainer().getContainer().getId()
.getApplicationAttemptId();
if (!reservedApplication.equals(
application.getApplicationAttemptId())) {
throw new IllegalStateException("Trying to unreserve " +
" for application " + application.getApplicationAttemptId() +
" when currently reserved " +
" for application " + reservedApplication.getApplicationId() +
" on node " + this);
}
}
setReservedContainer(null);
}
}
| 4,851 | 39.773109 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.ContainersAndNMTokensAllocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerRescheduledEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
@LimitedPrivate("yarn")
@Evolving
@SuppressWarnings("unchecked")
public class FifoScheduler extends
AbstractYarnScheduler<FiCaSchedulerApp, FiCaSchedulerNode> implements
Configurable {
private static final Log LOG = LogFactory.getLog(FifoScheduler.class);
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
Configuration conf;
private boolean usePortForNodeName;
private ActiveUsersManager activeUsersManager;
private static final String DEFAULT_QUEUE_NAME = "default";
private QueueMetrics metrics;
private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
private final Queue DEFAULT_QUEUE = new Queue() {
@Override
public String getQueueName() {
return DEFAULT_QUEUE_NAME;
}
@Override
public QueueMetrics getMetrics() {
return metrics;
}
@Override
public QueueInfo getQueueInfo(
boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
queueInfo.setCapacity(1.0f);
if (clusterResource.getMemory() == 0) {
queueInfo.setCurrentCapacity(0.0f);
} else {
queueInfo.setCurrentCapacity((float) usedResource.getMemory()
/ clusterResource.getMemory());
}
queueInfo.setMaximumCapacity(1.0f);
queueInfo.setChildQueues(new ArrayList<QueueInfo>());
queueInfo.setQueueState(QueueState.RUNNING);
return queueInfo;
}
public Map<QueueACL, AccessControlList> getQueueAcls() {
Map<QueueACL, AccessControlList> acls =
new HashMap<QueueACL, AccessControlList>();
for (QueueACL acl : QueueACL.values()) {
acls.put(acl, new AccessControlList("*"));
}
return acls;
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo(
UserGroupInformation unused) {
QueueUserACLInfo queueUserAclInfo =
recordFactory.newRecordInstance(QueueUserACLInfo.class);
queueUserAclInfo.setQueueName(DEFAULT_QUEUE_NAME);
queueUserAclInfo.setUserAcls(Arrays.asList(QueueACL.values()));
return Collections.singletonList(queueUserAclInfo);
}
@Override
public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
return getQueueAcls().get(acl).isUserAllowed(user);
}
@Override
public ActiveUsersManager getActiveUsersManager() {
return activeUsersManager;
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) {
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
increaseUsedResources(rmContainer);
updateAppHeadRoom(schedulerAttempt);
updateAvailableResourcesMetrics();
}
@Override
public Set<String> getAccessibleNodeLabels() {
// TODO add implementation for FIFO scheduler
return null;
}
@Override
public String getDefaultNodeLabelExpression() {
// TODO add implementation for FIFO scheduler
return null;
}
@Override
public void incPendingResource(String nodeLabel, Resource resourceToInc) {
}
@Override
public void decPendingResource(String nodeLabel, Resource resourceToDec) {
}
@Override
public Priority getDefaultApplicationPriority() {
// TODO add implementation for FIFO scheduler
return null;
}
};
public FifoScheduler() {
super(FifoScheduler.class.getName());
}
private synchronized void initScheduler(Configuration conf) {
validateConf(conf);
//Use ConcurrentSkipListMap because applications need to be ordered
this.applications =
new ConcurrentSkipListMap<ApplicationId, SchedulerApplication<FiCaSchedulerApp>>();
this.minimumAllocation =
Resources.createResource(conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
initMaximumResourceCapability(
Resources.createResource(conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)));
this.usePortForNodeName = conf.getBoolean(
YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false,
conf);
this.activeUsersManager = new ActiveUsersManager(metrics);
}
@Override
public void serviceInit(Configuration conf) throws Exception {
initScheduler(conf);
super.serviceInit(conf);
}
@Override
public void serviceStart() throws Exception {
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
super.serviceStop();
}
@Override
public synchronized void setConf(Configuration conf) {
this.conf = conf;
}
private void validateConf(Configuration conf) {
// validate scheduler memory allocation setting
int minMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int maxMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
if (minMem <= 0 || minMem > maxMem) {
throw new YarnRuntimeException("Invalid resource scheduler memory"
+ " allocation configuration"
+ ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB
+ "=" + minMem
+ ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB
+ "=" + maxMem + ", min and max should be greater than 0"
+ ", max should be no smaller than min.");
}
}
@Override
public synchronized Configuration getConf() {
return conf;
}
@Override
public int getNumClusterNodes() {
return nodes.size();
}
@Override
public synchronized void setRMContext(RMContext rmContext) {
this.rmContext = rmContext;
}
@Override
public synchronized void
reinitialize(Configuration conf, RMContext rmContext) throws IOException
{
setConf(conf);
}
@Override
public Allocation allocate(
ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals) {
FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
if (application == null) {
LOG.error("Calling allocate on removed " +
"or non existant application " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
// Sanity check
SchedulerUtils.normalizeRequests(ask, resourceCalculator,
clusterResource, minimumAllocation, getMaximumResourceCapability());
// Release containers
releaseContainers(release, application);
synchronized (application) {
// make sure we aren't stopping/removing the application
// when the allocate comes in
if (application.isStopped()) {
LOG.info("Calling allocate on a stopped " +
"application " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
if (!ask.isEmpty()) {
LOG.debug("allocate: pre-update" +
" applicationId=" + applicationAttemptId +
" application=" + application);
application.showRequests();
// Update application requests
application.updateResourceRequests(ask);
LOG.debug("allocate: post-update" +
" applicationId=" + applicationAttemptId +
" application=" + application);
application.showRequests();
LOG.debug("allocate:" +
" applicationId=" + applicationAttemptId +
" #ask=" + ask.size());
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
ContainersAndNMTokensAllocation allocation =
application.pullNewlyAllocatedContainersAndNMTokens();
Resource headroom = application.getHeadroom();
application.setApplicationHeadroomForMetrics(headroom);
return new Allocation(allocation.getContainerList(), headroom, null,
null, null, allocation.getNMTokenList());
}
}
private FiCaSchedulerNode getNode(NodeId nodeId) {
return nodes.get(nodeId);
}
@VisibleForTesting
public synchronized void addApplication(ApplicationId applicationId,
String queue, String user, boolean isAppRecovering) {
SchedulerApplication<FiCaSchedulerApp> application =
new SchedulerApplication<FiCaSchedulerApp>(DEFAULT_QUEUE, user);
applications.put(applicationId, application);
metrics.submitApp(user);
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", currently num of applications: " + applications.size());
if (isAppRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
}
} else {
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
}
}
@VisibleForTesting
public synchronized void
addApplicationAttempt(ApplicationAttemptId appAttemptId,
boolean transferStateFromPreviousAttempt,
boolean isAttemptRecovering) {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(appAttemptId.getApplicationId());
String user = application.getUser();
// TODO: Fix store
FiCaSchedulerApp schedulerApp =
new FiCaSchedulerApp(appAttemptId, user, DEFAULT_QUEUE,
activeUsersManager, this.rmContext);
if (transferStateFromPreviousAttempt) {
schedulerApp.transferStateFromPreviousAttempt(application
.getCurrentAppAttempt());
}
application.setCurrentAppAttempt(schedulerApp);
metrics.submitAppAttempt(user);
LOG.info("Added Application Attempt " + appAttemptId
+ " to scheduler from user " + application.getUser());
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(appAttemptId
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else {
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(appAttemptId,
RMAppAttemptEventType.ATTEMPT_ADDED));
}
}
private synchronized void doneApplication(ApplicationId applicationId,
RMAppState finalState) {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationId);
if (application == null){
LOG.warn("Couldn't find application " + applicationId);
return;
}
// Inform the activeUsersManager
activeUsersManager.deactivateApplication(application.getUser(),
applicationId);
application.stop(finalState);
applications.remove(applicationId);
}
private synchronized void doneApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers)
throws IOException {
FiCaSchedulerApp attempt = getApplicationAttempt(applicationAttemptId);
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationAttemptId.getApplicationId());
if (application == null || attempt == null) {
throw new IOException("Unknown application " + applicationAttemptId +
" has completed!");
}
// Kill all 'live' containers
for (RMContainer container : attempt.getLiveContainers()) {
if (keepContainers
&& container.getState().equals(RMContainerState.RUNNING)) {
// do not kill the running container in the case of work-preserving AM
// restart.
LOG.info("Skip killing " + container.getContainerId());
continue;
}
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(), SchedulerUtils.COMPLETED_APPLICATION),
RMContainerEventType.KILL);
}
// Clean up pending requests, metrics etc.
attempt.stop(rmAppAttemptFinalState);
}
/**
* Heart of the scheduler...
*
* @param node node on which resources are available to be allocated
*/
private void assignContainers(FiCaSchedulerNode node) {
LOG.debug("assignContainers:" +
" node=" + node.getRMNode().getNodeAddress() +
" #applications=" + applications.size());
// Try to assign containers to applications in fifo order
for (Map.Entry<ApplicationId, SchedulerApplication<FiCaSchedulerApp>> e : applications
.entrySet()) {
FiCaSchedulerApp application = e.getValue().getCurrentAppAttempt();
if (application == null) {
continue;
}
LOG.debug("pre-assignContainers");
application.showRequests();
synchronized (application) {
// Check if this resource is on the blacklist
if (SchedulerAppUtils.isBlacklisted(application, node, LOG)) {
continue;
}
for (Priority priority : application.getPriorities()) {
int maxContainers =
getMaxAllocatableContainers(application, priority, node,
NodeType.OFF_SWITCH);
// Ensure the application needs containers of this priority
if (maxContainers > 0) {
int assignedContainers =
assignContainersOnNode(node, application, priority);
// Do not assign out of order w.r.t priorities
if (assignedContainers == 0) {
break;
}
}
}
}
LOG.debug("post-assignContainers");
application.showRequests();
// Done
if (Resources.lessThan(resourceCalculator, clusterResource,
node.getAvailableResource(), minimumAllocation)) {
break;
}
}
// Update the applications' headroom to correctly take into
// account the containers assigned in this update.
for (SchedulerApplication<FiCaSchedulerApp> application : applications.values()) {
FiCaSchedulerApp attempt =
(FiCaSchedulerApp) application.getCurrentAppAttempt();
if (attempt == null) {
continue;
}
updateAppHeadRoom(attempt);
}
}
private int getMaxAllocatableContainers(FiCaSchedulerApp application,
Priority priority, FiCaSchedulerNode node, NodeType type) {
int maxContainers = 0;
ResourceRequest offSwitchRequest =
application.getResourceRequest(priority, ResourceRequest.ANY);
if (offSwitchRequest != null) {
maxContainers = offSwitchRequest.getNumContainers();
}
if (type == NodeType.OFF_SWITCH) {
return maxContainers;
}
if (type == NodeType.RACK_LOCAL) {
ResourceRequest rackLocalRequest =
application.getResourceRequest(priority, node.getRMNode().getRackName());
if (rackLocalRequest == null) {
return maxContainers;
}
maxContainers = Math.min(maxContainers, rackLocalRequest.getNumContainers());
}
if (type == NodeType.NODE_LOCAL) {
ResourceRequest nodeLocalRequest =
application.getResourceRequest(priority, node.getRMNode().getNodeAddress());
if (nodeLocalRequest != null) {
maxContainers = Math.min(maxContainers, nodeLocalRequest.getNumContainers());
}
}
return maxContainers;
}
private int assignContainersOnNode(FiCaSchedulerNode node,
FiCaSchedulerApp application, Priority priority
) {
// Data-local
int nodeLocalContainers =
assignNodeLocalContainers(node, application, priority);
// Rack-local
int rackLocalContainers =
assignRackLocalContainers(node, application, priority);
// Off-switch
int offSwitchContainers =
assignOffSwitchContainers(node, application, priority);
LOG.debug("assignContainersOnNode:" +
" node=" + node.getRMNode().getNodeAddress() +
" application=" + application.getApplicationId().getId() +
" priority=" + priority.getPriority() +
" #assigned=" +
(nodeLocalContainers + rackLocalContainers + offSwitchContainers));
return (nodeLocalContainers + rackLocalContainers + offSwitchContainers);
}
private int assignNodeLocalContainers(FiCaSchedulerNode node,
FiCaSchedulerApp application, Priority priority) {
int assignedContainers = 0;
ResourceRequest request =
application.getResourceRequest(priority, node.getNodeName());
if (request != null) {
// Don't allocate on this node if we don't need containers on this rack
ResourceRequest rackRequest =
application.getResourceRequest(priority,
node.getRMNode().getRackName());
if (rackRequest == null || rackRequest.getNumContainers() <= 0) {
return 0;
}
int assignableContainers =
Math.min(
getMaxAllocatableContainers(application, priority, node,
NodeType.NODE_LOCAL),
request.getNumContainers());
assignedContainers =
assignContainer(node, application, priority,
assignableContainers, request, NodeType.NODE_LOCAL);
}
return assignedContainers;
}
private int assignRackLocalContainers(FiCaSchedulerNode node,
FiCaSchedulerApp application, Priority priority) {
int assignedContainers = 0;
ResourceRequest request =
application.getResourceRequest(priority, node.getRMNode().getRackName());
if (request != null) {
// Don't allocate on this rack if the application doens't need containers
ResourceRequest offSwitchRequest =
application.getResourceRequest(priority, ResourceRequest.ANY);
if (offSwitchRequest.getNumContainers() <= 0) {
return 0;
}
int assignableContainers =
Math.min(
getMaxAllocatableContainers(application, priority, node,
NodeType.RACK_LOCAL),
request.getNumContainers());
assignedContainers =
assignContainer(node, application, priority,
assignableContainers, request, NodeType.RACK_LOCAL);
}
return assignedContainers;
}
private int assignOffSwitchContainers(FiCaSchedulerNode node,
FiCaSchedulerApp application, Priority priority) {
int assignedContainers = 0;
ResourceRequest request =
application.getResourceRequest(priority, ResourceRequest.ANY);
if (request != null) {
assignedContainers =
assignContainer(node, application, priority,
request.getNumContainers(), request, NodeType.OFF_SWITCH);
}
return assignedContainers;
}
private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application,
Priority priority, int assignableContainers,
ResourceRequest request, NodeType type) {
LOG.debug("assignContainers:" +
" node=" + node.getRMNode().getNodeAddress() +
" application=" + application.getApplicationId().getId() +
" priority=" + priority.getPriority() +
" assignableContainers=" + assignableContainers +
" request=" + request + " type=" + type);
Resource capability = request.getCapability();
int availableContainers =
node.getAvailableResource().getMemory() / capability.getMemory(); // TODO: A buggy
// application
// with this
// zero would
// crash the
// scheduler.
int assignedContainers =
Math.min(assignableContainers, availableContainers);
if (assignedContainers > 0) {
for (int i=0; i < assignedContainers; ++i) {
NodeId nodeId = node.getRMNode().getNodeID();
ContainerId containerId = BuilderUtils.newContainerId(application
.getApplicationAttemptId(), application.getNewContainerId());
// Create the container
Container container =
BuilderUtils.newContainer(containerId, nodeId, node.getRMNode()
.getHttpAddress(), capability, priority, null);
// Allocate!
// Inform the application
RMContainer rmContainer =
application.allocate(type, node, priority, request, container);
// Inform the node
node.allocateContainer(rmContainer);
// Update usage for this container
increaseUsedResources(rmContainer);
}
}
return assignedContainers;
}
private synchronized void nodeUpdate(RMNode rmNode) {
FiCaSchedulerNode node = getNode(rmNode.getNodeID());
List<UpdatedContainerInfo> containerInfoList = rmNode.pullContainerUpdates();
List<ContainerStatus> newlyLaunchedContainers = new ArrayList<ContainerStatus>();
List<ContainerStatus> completedContainers = new ArrayList<ContainerStatus>();
for(UpdatedContainerInfo containerInfo : containerInfoList) {
newlyLaunchedContainers.addAll(containerInfo.getNewlyLaunchedContainers());
completedContainers.addAll(containerInfo.getCompletedContainers());
}
// Processing the newly launched containers
for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
containerLaunchedOnNode(launchedContainer.getContainerId(), node);
}
// Process completed containers
for (ContainerStatus completedContainer : completedContainers) {
ContainerId containerId = completedContainer.getContainerId();
LOG.debug("Container FINISHED: " + containerId);
completedContainer(getRMContainer(containerId),
completedContainer, RMContainerEventType.FINISHED);
}
if (rmContext.isWorkPreservingRecoveryEnabled()
&& !rmContext.isSchedulerReadyForAllocatingContainers()) {
return;
}
if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource,
node.getAvailableResource(),minimumAllocation)) {
LOG.debug("Node heartbeat " + rmNode.getNodeID() +
" available resource = " + node.getAvailableResource());
assignContainers(node);
LOG.debug("Node after allocation " + rmNode.getNodeID() + " resource = "
+ node.getAvailableResource());
}
updateAvailableResourcesMetrics();
}
private void increaseUsedResources(RMContainer rmContainer) {
Resources.addTo(usedResource, rmContainer.getAllocatedResource());
}
private void updateAppHeadRoom(SchedulerApplicationAttempt schedulerAttempt) {
schedulerAttempt.setHeadroom(Resources.subtract(clusterResource,
usedResource));
}
private void updateAvailableResourcesMetrics() {
metrics.setAvailableResourcesToQueue(Resources.subtract(clusterResource,
usedResource));
}
@Override
public void handle(SchedulerEvent event) {
switch(event.getType()) {
case NODE_ADDED:
{
NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
addNode(nodeAddedEvent.getAddedRMNode());
recoverContainersOnNode(nodeAddedEvent.getContainerReports(),
nodeAddedEvent.getAddedRMNode());
}
break;
case NODE_REMOVED:
{
NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
removeNode(nodeRemovedEvent.getRemovedRMNode());
}
break;
case NODE_RESOURCE_UPDATE:
{
NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent =
(NodeResourceUpdateSchedulerEvent)event;
updateNodeResource(nodeResourceUpdatedEvent.getRMNode(),
nodeResourceUpdatedEvent.getResourceOption());
}
break;
case NODE_UPDATE:
{
NodeUpdateSchedulerEvent nodeUpdatedEvent =
(NodeUpdateSchedulerEvent)event;
nodeUpdate(nodeUpdatedEvent.getRMNode());
}
break;
case APP_ADDED:
{
AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
addApplication(appAddedEvent.getApplicationId(),
appAddedEvent.getQueue(), appAddedEvent.getUser(),
appAddedEvent.getIsAppRecovering());
}
break;
case APP_REMOVED:
{
AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
doneApplication(appRemovedEvent.getApplicationID(),
appRemovedEvent.getFinalState());
}
break;
case APP_ATTEMPT_ADDED:
{
AppAttemptAddedSchedulerEvent appAttemptAddedEvent =
(AppAttemptAddedSchedulerEvent) event;
addApplicationAttempt(appAttemptAddedEvent.getApplicationAttemptId(),
appAttemptAddedEvent.getTransferStateFromPreviousAttempt(),
appAttemptAddedEvent.getIsAttemptRecovering());
}
break;
case APP_ATTEMPT_REMOVED:
{
AppAttemptRemovedSchedulerEvent appAttemptRemovedEvent =
(AppAttemptRemovedSchedulerEvent) event;
try {
doneApplicationAttempt(
appAttemptRemovedEvent.getApplicationAttemptID(),
appAttemptRemovedEvent.getFinalAttemptState(),
appAttemptRemovedEvent.getKeepContainersAcrossAppAttempts());
} catch(IOException ie) {
LOG.error("Unable to remove application "
+ appAttemptRemovedEvent.getApplicationAttemptID(), ie);
}
}
break;
case CONTAINER_EXPIRED:
{
ContainerExpiredSchedulerEvent containerExpiredEvent =
(ContainerExpiredSchedulerEvent) event;
ContainerId containerid = containerExpiredEvent.getContainerId();
completedContainer(getRMContainer(containerid),
SchedulerUtils.createAbnormalContainerStatus(
containerid,
SchedulerUtils.EXPIRED_CONTAINER),
RMContainerEventType.EXPIRE);
}
break;
case CONTAINER_RESCHEDULED:
{
ContainerRescheduledEvent containerRescheduledEvent =
(ContainerRescheduledEvent) event;
RMContainer container = containerRescheduledEvent.getContainer();
recoverResourceRequestForContainer(container);
}
break;
default:
LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
}
}
@Lock(FifoScheduler.class)
@Override
protected synchronized void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
if (rmContainer == null) {
LOG.info("Null container completed...");
return;
}
// Get the application for the finished container
Container container = rmContainer.getContainer();
FiCaSchedulerApp application =
getCurrentAttemptForContainer(container.getId());
ApplicationId appId =
container.getId().getApplicationAttemptId().getApplicationId();
// Get the node on which the container was allocated
FiCaSchedulerNode node = getNode(container.getNodeId());
if (application == null) {
LOG.info("Unknown application: " + appId +
" released container " + container.getId() +
" on node: " + node +
" with event: " + event);
return;
}
// Inform the application
application.containerCompleted(rmContainer, containerStatus, event,
RMNodeLabelsManager.NO_LABEL);
// Inform the node
node.releaseContainer(container);
// Update total usage
Resources.subtractFrom(usedResource, container.getResource());
LOG.info("Application attempt " + application.getApplicationAttemptId() +
" released container " + container.getId() +
" on node: " + node +
" with event: " + event);
}
private Resource usedResource = recordFactory.newRecordInstance(Resource.class);
private synchronized void removeNode(RMNode nodeInfo) {
FiCaSchedulerNode node = getNode(nodeInfo.getNodeID());
if (node == null) {
return;
}
// Kill running containers
for(RMContainer container : node.getRunningContainers()) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
//Remove the node
this.nodes.remove(nodeInfo.getNodeID());
updateMaximumAllocation(node, false);
// Update cluster metrics
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
}
@Override
public QueueInfo getQueueInfo(String queueName,
boolean includeChildQueues, boolean recursive) {
return DEFAULT_QUEUE.getQueueInfo(false, false);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo() {
return DEFAULT_QUEUE.getQueueUserAclInfo(null);
}
@Override
public ResourceCalculator getResourceCalculator() {
return resourceCalculator;
}
private synchronized void addNode(RMNode nodeManager) {
FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
usePortForNodeName);
this.nodes.put(nodeManager.getNodeID(), schedulerNode);
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
updateMaximumAllocation(schedulerNode, true);
}
@Override
public void recover(RMState state) {
// NOT IMPLEMENTED
}
@Override
public RMContainer getRMContainer(ContainerId containerId) {
FiCaSchedulerApp attempt = getCurrentAttemptForContainer(containerId);
return (attempt == null) ? null : attempt.getRMContainer(containerId);
}
@Override
public QueueMetrics getRootQueueMetrics() {
return DEFAULT_QUEUE.getMetrics();
}
@Override
public synchronized boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName) {
return DEFAULT_QUEUE.hasAccess(acl, callerUGI);
}
@Override
public synchronized List<ApplicationAttemptId>
getAppsInQueue(String queueName) {
if (queueName.equals(DEFAULT_QUEUE.getQueueName())) {
List<ApplicationAttemptId> attempts =
new ArrayList<ApplicationAttemptId>(applications.size());
for (SchedulerApplication<FiCaSchedulerApp> app : applications.values()) {
attempts.add(app.getCurrentAppAttempt().getApplicationAttemptId());
}
return attempts;
} else {
return null;
}
}
public Resource getUsedResource() {
return usedResource;
}
}
| 37,090 | 36.016966 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FifoAppComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.Serializable;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* Order {@link FSAppAttempt} objects by priority and then by submit time, as
* in the default scheduler in Hadoop.
*/
@Private
@Unstable
public class FifoAppComparator implements Comparator<FSAppAttempt>, Serializable {
private static final long serialVersionUID = 3428835083489547918L;
public int compare(FSAppAttempt a1, FSAppAttempt a2) {
int res = a1.getPriority().compareTo(a2.getPriority());
if (res == 0) {
if (a1.getStartTime() < a2.getStartTime()) {
res = -1;
} else {
res = (a1.getStartTime() == a2.getStartTime() ? 0 : 1);
}
}
if (res == 0) {
// If there is a tie, break it by app ID to get a deterministic order
res = a1.getApplicationId().compareTo(a2.getApplicationId());
}
return res;
}
}
| 1,863 | 34.846154 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfigurationException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* Thrown when the allocation file for {@link QueueManager} is malformed.
*/
@Private
@Unstable
public class AllocationConfigurationException extends Exception {
private static final long serialVersionUID = 4046517047810854249L;
public AllocationConfigurationException(String message) {
super(message);
}
public AllocationConfigurationException(String message, Throwable t) {
super(message, t);
}
}
| 1,442 | 35.075 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementRule.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public abstract class QueuePlacementRule {
protected boolean create;
public static final Log LOG =
LogFactory.getLog(QueuePlacementRule.class.getName());
/**
* Initializes the rule with any arguments.
*
* @param args
* Additional attributes of the rule's xml element other than create.
*/
public QueuePlacementRule initialize(boolean create, Map<String, String> args) {
this.create = create;
return this;
}
/**
*
* @param requestedQueue
* The queue explicitly requested.
* @param user
* The user submitting the app.
* @param groups
* The groups of the user submitting the app.
* @param configuredQueues
* The queues specified in the scheduler configuration.
* @return
* The queue to place the app into. An empty string indicates that we should
* continue to the next rule, and null indicates that the app should be rejected.
*/
public String assignAppToQueue(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues)
throws IOException {
String queue = getQueueForApp(requestedQueue, user, groups,
configuredQueues);
if (create || configuredQueues.get(FSQueueType.LEAF).contains(queue)
|| configuredQueues.get(FSQueueType.PARENT).contains(queue)) {
return queue;
} else {
return "";
}
}
public void initializeFromXml(Element el)
throws AllocationConfigurationException {
boolean create = true;
NamedNodeMap attributes = el.getAttributes();
Map<String, String> args = new HashMap<String, String>();
for (int i = 0; i < attributes.getLength(); i++) {
Node node = attributes.item(i);
String key = node.getNodeName();
String value = node.getNodeValue();
if (key.equals("create")) {
create = Boolean.parseBoolean(value);
} else {
args.put(key, value);
}
}
initialize(create, args);
}
/**
* Returns true if this rule never tells the policy to continue.
*/
public abstract boolean isTerminal();
/**
* Applies this rule to an app with the given requested queue and user/group
* information.
*
* @param requestedQueue
* The queue specified in the ApplicationSubmissionContext
* @param user
* The user submitting the app.
* @param groups
* The groups of the user submitting the app.
* @return
* The name of the queue to assign the app to, or null to empty string
* continue to the next rule.
*/
protected abstract String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues)
throws IOException;
/**
* Places apps in queues by username of the submitter
*/
public static class User extends QueuePlacementRule {
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues) {
return "root." + cleanName(user);
}
@Override
public boolean isTerminal() {
return create;
}
}
/**
* Places apps in queues by primary group of the submitter
*/
public static class PrimaryGroup extends QueuePlacementRule {
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues)
throws IOException {
return "root." + cleanName(groups.getGroups(user).get(0));
}
@Override
public boolean isTerminal() {
return create;
}
}
/**
* Places apps in queues by secondary group of the submitter
*
* Match will be made on first secondary group that exist in
* queues
*/
public static class SecondaryGroupExistingQueue extends QueuePlacementRule {
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues)
throws IOException {
List<String> groupNames = groups.getGroups(user);
for (int i = 1; i < groupNames.size(); i++) {
String group = cleanName(groupNames.get(i));
if (configuredQueues.get(FSQueueType.LEAF).contains("root." + group)
|| configuredQueues.get(FSQueueType.PARENT).contains(
"root." + group)) {
return "root." + group;
}
}
return "";
}
@Override
public boolean isTerminal() {
return false;
}
}
/**
* Places apps in queues with name of the submitter under the queue
* returned by the nested rule.
*/
public static class NestedUserQueue extends QueuePlacementRule {
@VisibleForTesting
QueuePlacementRule nestedRule;
/**
* Parse xml and instantiate the nested rule
*/
@Override
public void initializeFromXml(Element el)
throws AllocationConfigurationException {
NodeList elements = el.getChildNodes();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
Element element = (Element) node;
if ("rule".equals(element.getTagName())) {
QueuePlacementRule rule = QueuePlacementPolicy
.createAndInitializeRule(node);
if (rule == null) {
throw new AllocationConfigurationException(
"Unable to create nested rule in nestedUserQueue rule");
}
this.nestedRule = rule;
break;
} else {
continue;
}
}
}
if (this.nestedRule == null) {
throw new AllocationConfigurationException(
"No nested rule specified in <nestedUserQueue> rule");
}
super.initializeFromXml(el);
}
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues)
throws IOException {
// Apply the nested rule
String queueName = nestedRule.assignAppToQueue(requestedQueue, user,
groups, configuredQueues);
if (queueName != null && queueName.length() != 0) {
if (!queueName.startsWith("root.")) {
queueName = "root." + queueName;
}
// Verify if the queue returned by the nested rule is an configured leaf queue,
// if yes then skip to next rule in the queue placement policy
if (configuredQueues.get(FSQueueType.LEAF).contains(queueName)) {
return "";
}
return queueName + "." + cleanName(user);
}
return queueName;
}
@Override
public boolean isTerminal() {
return false;
}
}
/**
* Places apps in queues by requested queue of the submitter
*/
public static class Specified extends QueuePlacementRule {
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues) {
if (requestedQueue.equals(YarnConfiguration.DEFAULT_QUEUE_NAME)) {
return "";
} else {
if (!requestedQueue.startsWith("root.")) {
requestedQueue = "root." + requestedQueue;
}
return requestedQueue;
}
}
@Override
public boolean isTerminal() {
return false;
}
}
/**
* Places apps in the specified default queue. If no default queue is
* specified the app is placed in root.default queue.
*/
public static class Default extends QueuePlacementRule {
@VisibleForTesting
String defaultQueueName;
@Override
public QueuePlacementRule initialize(boolean create,
Map<String, String> args) {
if (defaultQueueName == null) {
defaultQueueName = "root." + YarnConfiguration.DEFAULT_QUEUE_NAME;
}
return super.initialize(create, args);
}
@Override
public void initializeFromXml(Element el)
throws AllocationConfigurationException {
defaultQueueName = el.getAttribute("queue");
if (defaultQueueName != null && !defaultQueueName.isEmpty()) {
if (!defaultQueueName.startsWith("root.")) {
defaultQueueName = "root." + defaultQueueName;
}
} else {
defaultQueueName = "root." + YarnConfiguration.DEFAULT_QUEUE_NAME;
}
super.initializeFromXml(el);
}
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues) {
return defaultQueueName;
}
@Override
public boolean isTerminal() {
return true;
}
}
/**
* Rejects all apps
*/
public static class Reject extends QueuePlacementRule {
@Override
public String assignAppToQueue(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues) {
return null;
}
@Override
protected String getQueueForApp(String requestedQueue, String user,
Groups groups, Map<FSQueueType, Set<String>> configuredQueues) {
throw new UnsupportedOperationException();
}
@Override
public boolean isTerminal() {
return true;
}
}
/**
* Replace the periods in the username or groupname with "_dot_" and
* remove trailing and leading whitespace.
*/
protected String cleanName(String name) {
name = name.trim();
if (name.contains(".")) {
String converted = name.replaceAll("\\.", "_dot_");
LOG.warn("Name " + name + " is converted to " + converted
+ " when it is used as a queue name.");
return converted;
} else {
return name;
}
}
}
| 11,310 | 30.15978 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.DailyRollingFileAppender;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.spi.LoggingEvent;
/**
* Event log used by the fair scheduler for machine-readable debug info.
* This class uses a log4j rolling file appender to write the log, but uses
* a custom tab-separated event format of the form:
* <pre>
* DATE EVENT_TYPE PARAM_1 PARAM_2 ...
* </pre>
* Various event types are used by the fair scheduler. The purpose of logging
* in this format is to enable tools to parse the history log easily and read
* internal scheduler variables, rather than trying to make the log human
* readable. The fair scheduler also logs human readable messages in the
* JobTracker's main log.
*
* Constructing this class creates a disabled log. It must be initialized
* using {@link FairSchedulerEventLog#init(Configuration, String)} to begin
* writing to the file.
*/
@Private
@Unstable
class FairSchedulerEventLog {
private static final Log LOG = LogFactory.getLog(FairSchedulerEventLog.class.getName());
/** Set to true if logging is disabled due to an error. */
private boolean logDisabled = true;
/**
* Log directory, set by mapred.fairscheduler.eventlog.location in conf file;
* defaults to {hadoop.log.dir}/fairscheduler.
*/
private String logDir;
/**
* Active log file, which is {LOG_DIR}/hadoop-{user}-fairscheduler.log.
* Older files are also stored as {LOG_FILE}.date (date format YYYY-MM-DD).
*/
private String logFile;
/** Log4j appender used to write to the log file */
private DailyRollingFileAppender appender;
boolean init(FairSchedulerConfiguration conf) {
if (conf.isEventLogEnabled()) {
try {
logDir = conf.getEventlogDir();
File logDirFile = new File(logDir);
if (!logDirFile.exists()) {
if (!logDirFile.mkdirs()) {
throw new IOException(
"Mkdirs failed to create " + logDirFile.toString());
}
}
String username = System.getProperty("user.name");
logFile = String.format("%s%shadoop-%s-fairscheduler.log",
logDir, File.separator, username);
logDisabled = false;
PatternLayout layout = new PatternLayout("%d{ISO8601}\t%m%n");
appender = new DailyRollingFileAppender(layout, logFile, "'.'yyyy-MM-dd");
appender.activateOptions();
LOG.info("Initialized fair scheduler event log, logging to " + logFile);
} catch (IOException e) {
LOG.error(
"Failed to initialize fair scheduler event log. Disabling it.", e);
logDisabled = true;
}
} else {
logDisabled = true;
}
return !(logDisabled);
}
/**
* Log an event, writing a line in the log file of the form
* <pre>
* DATE EVENT_TYPE PARAM_1 PARAM_2 ...
* </pre>
*/
synchronized void log(String eventType, Object... params) {
try {
if (logDisabled)
return;
StringBuffer buffer = new StringBuffer();
buffer.append(eventType);
for (Object param: params) {
buffer.append("\t");
buffer.append(param);
}
String message = buffer.toString();
Logger logger = Logger.getLogger(getClass());
appender.append(new LoggingEvent("", logger, Level.INFO, message, null));
} catch (Exception e) {
LOG.error("Failed to append to fair scheduler event log", e);
logDisabled = true;
}
}
/**
* Flush and close the log.
*/
synchronized void shutdown() {
try {
if (appender != null)
appender.close();
} catch (Exception e) {
LOG.error("Failed to close fair scheduler event log", e);
logDisabled = true;
}
}
synchronized boolean isEnabled() {
return !logDisabled;
}
public String getLogFile() {
return logFile;
}
}
| 5,185 | 32.895425 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
/**
* A Schedulable represents an entity that can be scheduled such as an
* application or a queue. It provides a common interface so that algorithms
* such as fair sharing can be applied both within a queue and across queues.
*
* A Schedulable is responsible for three roles:
* 1) Assign resources through {@link #assignContainer}.
* 2) It provides information about the app/queue to the scheduler, including:
* - Demand (maximum number of tasks required)
* - Minimum share (for queues)
* - Job/queue weight (for fair sharing)
* - Start time and priority (for FIFO)
* 3) It can be assigned a fair share, for use with fair scheduling.
*
* Schedulable also contains two methods for performing scheduling computations:
* - updateDemand() is called periodically to compute the demand of the various
* jobs and queues, which may be expensive (e.g. jobs must iterate through all
* their tasks to count failed tasks, tasks that can be speculated, etc).
* - redistributeShare() is called after demands are updated and a Schedulable's
* fair share has been set by its parent to let it distribute its share among
* the other Schedulables within it (e.g. for queues that want to perform fair
* sharing among their jobs).
*/
@Private
@Unstable
public interface Schedulable {
/**
* Name of job/queue, used for debugging as well as for breaking ties in
* scheduling order deterministically.
*/
public String getName();
/**
* Maximum number of resources required by this Schedulable. This is defined as
* number of currently utilized resources + number of unlaunched resources (that
* are either not yet launched or need to be speculated).
*/
public Resource getDemand();
/** Get the aggregate amount of resources consumed by the schedulable. */
public Resource getResourceUsage();
/** Minimum Resource share assigned to the schedulable. */
public Resource getMinShare();
/** Maximum Resource share assigned to the schedulable. */
public Resource getMaxShare();
/** Job/queue weight in fair sharing. */
public ResourceWeights getWeights();
/** Start time for jobs in FIFO queues; meaningless for QueueSchedulables.*/
public long getStartTime();
/** Job priority for jobs in FIFO queues; meaningless for QueueSchedulables. */
public Priority getPriority();
/** Refresh the Schedulable's demand and those of its children if any. */
public void updateDemand();
/**
* Assign a container on this node if possible, and return the amount of
* resources assigned.
*/
public Resource assignContainer(FSSchedulerNode node);
/**
* Preempt a container from this Schedulable if possible.
*/
public RMContainer preemptContainer();
/** Get the fair share assigned to this Schedulable. */
public Resource getFairShare();
/** Assign a fair share to this Schedulable. */
public void setFairShare(Resource fairShare);
}
| 4,207 | 39.07619 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerFinishedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* Represents an application attempt from the viewpoint of the Fair Scheduler.
*/
@Private
@Unstable
public class FSAppAttempt extends SchedulerApplicationAttempt
implements Schedulable {
private static final Log LOG = LogFactory.getLog(FSAppAttempt.class);
private static final DefaultResourceCalculator RESOURCE_CALCULATOR
= new DefaultResourceCalculator();
private long startTime;
private Priority priority;
private ResourceWeights resourceWeights;
private Resource demand = Resources.createResource(0);
private FairScheduler scheduler;
private Resource fairShare = Resources.createResource(0, 0);
private Resource preemptedResources = Resources.createResource(0);
private RMContainerComparator comparator = new RMContainerComparator();
private final Map<RMContainer, Long> preemptionMap = new HashMap<RMContainer, Long>();
/**
* Delay scheduling: We often want to prioritize scheduling of node-local
* containers over rack-local or off-switch containers. To achieve this
* we first only allow node-local assignments for a given priority level,
* then relax the locality threshold once we've had a long enough period
* without successfully scheduling. We measure both the number of "missed"
* scheduling opportunities since the last container was scheduled
* at the current allowed level and the time since the last container
* was scheduled. Currently we use only the former.
*/
private final Map<Priority, NodeType> allowedLocalityLevel =
new HashMap<Priority, NodeType>();
public FSAppAttempt(FairScheduler scheduler,
ApplicationAttemptId applicationAttemptId, String user, FSLeafQueue queue,
ActiveUsersManager activeUsersManager, RMContext rmContext) {
super(applicationAttemptId, user, queue, activeUsersManager, rmContext);
this.scheduler = scheduler;
this.startTime = scheduler.getClock().getTime();
this.priority = Priority.newInstance(1);
this.resourceWeights = new ResourceWeights();
}
public ResourceWeights getResourceWeights() {
return resourceWeights;
}
/**
* Get metrics reference from containing queue.
*/
public QueueMetrics getMetrics() {
return queue.getMetrics();
}
synchronized public void containerCompleted(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
Container container = rmContainer.getContainer();
ContainerId containerId = container.getId();
// Remove from the list of newly allocated containers if found
newlyAllocatedContainers.remove(rmContainer);
// Inform the container
rmContainer.handle(
new RMContainerFinishedEvent(
containerId,
containerStatus,
event)
);
LOG.info("Completed container: " + rmContainer.getContainerId() +
" in state: " + rmContainer.getState() + " event:" + event);
// Remove from the list of containers
liveContainers.remove(rmContainer.getContainerId());
RMAuditLogger.logSuccess(getUser(),
AuditConstants.RELEASE_CONTAINER, "SchedulerApp",
getApplicationId(), containerId);
// Update usage metrics
Resource containerResource = rmContainer.getContainer().getResource();
queue.getMetrics().releaseResources(getUser(), 1, containerResource);
this.attemptResourceUsage.decUsed(containerResource);
// remove from preemption map if it is completed
preemptionMap.remove(rmContainer);
// Clear resource utilization metrics cache.
lastMemoryAggregateAllocationUpdateTime = -1;
}
private synchronized void unreserveInternal(
Priority priority, FSSchedulerNode node) {
Map<NodeId, RMContainer> reservedContainers =
this.reservedContainers.get(priority);
RMContainer reservedContainer = reservedContainers.remove(node.getNodeID());
if (reservedContainers.isEmpty()) {
this.reservedContainers.remove(priority);
}
// Reset the re-reservation count
resetReReservations(priority);
Resource resource = reservedContainer.getContainer().getResource();
this.attemptResourceUsage.decReserved(resource);
LOG.info("Application " + getApplicationId() + " unreserved " + " on node "
+ node + ", currently has " + reservedContainers.size()
+ " at priority " + priority + "; currentReservation "
+ this.attemptResourceUsage.getReserved());
}
/**
* Headroom depends on resources in the cluster, current usage of the
* queue, queue's fair-share and queue's max-resources.
*/
@Override
public Resource getHeadroom() {
final FSQueue queue = (FSQueue) this.queue;
SchedulingPolicy policy = queue.getPolicy();
Resource queueFairShare = queue.getFairShare();
Resource queueUsage = queue.getResourceUsage();
Resource clusterResource = this.scheduler.getClusterResource();
Resource clusterUsage = this.scheduler.getRootQueueMetrics()
.getAllocatedResources();
Resource clusterAvailableResources =
Resources.subtract(clusterResource, clusterUsage);
Resource queueMaxAvailableResources =
Resources.subtract(queue.getMaxShare(), queueUsage);
Resource maxAvailableResource = Resources.componentwiseMin(
clusterAvailableResources, queueMaxAvailableResources);
Resource headroom = policy.getHeadroom(queueFairShare,
queueUsage, maxAvailableResource);
if (LOG.isDebugEnabled()) {
LOG.debug("Headroom calculation for " + this.getName() + ":" +
"Min(" +
"(queueFairShare=" + queueFairShare +
" - queueUsage=" + queueUsage + ")," +
" maxAvailableResource=" + maxAvailableResource +
"Headroom=" + headroom);
}
return headroom;
}
public synchronized float getLocalityWaitFactor(
Priority priority, int clusterNodes) {
// Estimate: Required unique resources (i.e. hosts + racks)
int requiredResources =
Math.max(this.getResourceRequests(priority).size() - 1, 0);
// waitFactor can't be more than '1'
// i.e. no point skipping more than clustersize opportunities
return Math.min(((float)requiredResources / clusterNodes), 1.0f);
}
/**
* Return the level at which we are allowed to schedule containers, given the
* current size of the cluster and thresholds indicating how many nodes to
* fail at (as a fraction of cluster size) before relaxing scheduling
* constraints.
*/
public synchronized NodeType getAllowedLocalityLevel(Priority priority,
int numNodes, double nodeLocalityThreshold, double rackLocalityThreshold) {
// upper limit on threshold
if (nodeLocalityThreshold > 1.0) { nodeLocalityThreshold = 1.0; }
if (rackLocalityThreshold > 1.0) { rackLocalityThreshold = 1.0; }
// If delay scheduling is not being used, can schedule anywhere
if (nodeLocalityThreshold < 0.0 || rackLocalityThreshold < 0.0) {
return NodeType.OFF_SWITCH;
}
// Default level is NODE_LOCAL
if (!allowedLocalityLevel.containsKey(priority)) {
allowedLocalityLevel.put(priority, NodeType.NODE_LOCAL);
return NodeType.NODE_LOCAL;
}
NodeType allowed = allowedLocalityLevel.get(priority);
// If level is already most liberal, we're done
if (allowed.equals(NodeType.OFF_SWITCH)) return NodeType.OFF_SWITCH;
double threshold = allowed.equals(NodeType.NODE_LOCAL) ? nodeLocalityThreshold :
rackLocalityThreshold;
// Relax locality constraints once we've surpassed threshold.
if (getSchedulingOpportunities(priority) > (numNodes * threshold)) {
if (allowed.equals(NodeType.NODE_LOCAL)) {
allowedLocalityLevel.put(priority, NodeType.RACK_LOCAL);
resetSchedulingOpportunities(priority);
}
else if (allowed.equals(NodeType.RACK_LOCAL)) {
allowedLocalityLevel.put(priority, NodeType.OFF_SWITCH);
resetSchedulingOpportunities(priority);
}
}
return allowedLocalityLevel.get(priority);
}
/**
* Return the level at which we are allowed to schedule containers.
* Given the thresholds indicating how much time passed before relaxing
* scheduling constraints.
*/
public synchronized NodeType getAllowedLocalityLevelByTime(Priority priority,
long nodeLocalityDelayMs, long rackLocalityDelayMs,
long currentTimeMs) {
// if not being used, can schedule anywhere
if (nodeLocalityDelayMs < 0 || rackLocalityDelayMs < 0) {
return NodeType.OFF_SWITCH;
}
// default level is NODE_LOCAL
if (! allowedLocalityLevel.containsKey(priority)) {
allowedLocalityLevel.put(priority, NodeType.NODE_LOCAL);
return NodeType.NODE_LOCAL;
}
NodeType allowed = allowedLocalityLevel.get(priority);
// if level is already most liberal, we're done
if (allowed.equals(NodeType.OFF_SWITCH)) {
return NodeType.OFF_SWITCH;
}
// check waiting time
long waitTime = currentTimeMs;
if (lastScheduledContainer.containsKey(priority)) {
waitTime -= lastScheduledContainer.get(priority);
} else {
waitTime -= getStartTime();
}
long thresholdTime = allowed.equals(NodeType.NODE_LOCAL) ?
nodeLocalityDelayMs : rackLocalityDelayMs;
if (waitTime > thresholdTime) {
if (allowed.equals(NodeType.NODE_LOCAL)) {
allowedLocalityLevel.put(priority, NodeType.RACK_LOCAL);
resetSchedulingOpportunities(priority, currentTimeMs);
} else if (allowed.equals(NodeType.RACK_LOCAL)) {
allowedLocalityLevel.put(priority, NodeType.OFF_SWITCH);
resetSchedulingOpportunities(priority, currentTimeMs);
}
}
return allowedLocalityLevel.get(priority);
}
synchronized public RMContainer allocate(NodeType type, FSSchedulerNode node,
Priority priority, ResourceRequest request,
Container container) {
// Update allowed locality level
NodeType allowed = allowedLocalityLevel.get(priority);
if (allowed != null) {
if (allowed.equals(NodeType.OFF_SWITCH) &&
(type.equals(NodeType.NODE_LOCAL) ||
type.equals(NodeType.RACK_LOCAL))) {
this.resetAllowedLocalityLevel(priority, type);
}
else if (allowed.equals(NodeType.RACK_LOCAL) &&
type.equals(NodeType.NODE_LOCAL)) {
this.resetAllowedLocalityLevel(priority, type);
}
}
// Required sanity check - AM can call 'allocate' to update resource
// request without locking the scheduler, hence we need to check
if (getTotalRequiredResources(priority) <= 0) {
return null;
}
// Create RMContainer
RMContainer rmContainer = new RMContainerImpl(container,
getApplicationAttemptId(), node.getNodeID(),
appSchedulingInfo.getUser(), rmContext);
// Add it to allContainers list.
newlyAllocatedContainers.add(rmContainer);
liveContainers.put(container.getId(), rmContainer);
// Update consumption and track allocations
List<ResourceRequest> resourceRequestList = appSchedulingInfo.allocate(
type, node, priority, request, container);
this.attemptResourceUsage.incUsed(container.getResource());
// Update resource requests related to "request" and store in RMContainer
((RMContainerImpl) rmContainer).setResourceRequests(resourceRequestList);
// Inform the container
rmContainer.handle(
new RMContainerEvent(container.getId(), RMContainerEventType.START));
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: applicationAttemptId="
+ container.getId().getApplicationAttemptId()
+ " container=" + container.getId() + " host="
+ container.getNodeId().getHost() + " type=" + type);
}
RMAuditLogger.logSuccess(getUser(),
AuditConstants.ALLOC_CONTAINER, "SchedulerApp",
getApplicationId(), container.getId());
return rmContainer;
}
/**
* Should be called when the scheduler assigns a container at a higher
* degree of locality than the current threshold. Reset the allowed locality
* level to a higher degree of locality.
*/
public synchronized void resetAllowedLocalityLevel(Priority priority,
NodeType level) {
NodeType old = allowedLocalityLevel.get(priority);
LOG.info("Raising locality level from " + old + " to " + level + " at " +
" priority " + priority);
allowedLocalityLevel.put(priority, level);
}
// related methods
public void addPreemption(RMContainer container, long time) {
assert preemptionMap.get(container) == null;
preemptionMap.put(container, time);
Resources.addTo(preemptedResources, container.getAllocatedResource());
}
public Long getContainerPreemptionTime(RMContainer container) {
return preemptionMap.get(container);
}
public Set<RMContainer> getPreemptionContainers() {
return preemptionMap.keySet();
}
@Override
public FSLeafQueue getQueue() {
return (FSLeafQueue)super.getQueue();
}
public Resource getPreemptedResources() {
return preemptedResources;
}
public void resetPreemptedResources() {
preemptedResources = Resources.createResource(0);
for (RMContainer container : getPreemptionContainers()) {
Resources.addTo(preemptedResources, container.getAllocatedResource());
}
}
public void clearPreemptedResources() {
preemptedResources.setMemory(0);
preemptedResources.setVirtualCores(0);
}
/**
* Create and return a container object reflecting an allocation for the
* given appliction on the given node with the given capability and
* priority.
*/
public Container createContainer(
FSSchedulerNode node, Resource capability, Priority priority) {
NodeId nodeId = node.getRMNode().getNodeID();
ContainerId containerId = BuilderUtils.newContainerId(
getApplicationAttemptId(), getNewContainerId());
// Create the container
Container container =
BuilderUtils.newContainer(containerId, nodeId, node.getRMNode()
.getHttpAddress(), capability, priority, null);
return container;
}
/**
* Reserve a spot for {@code container} on this {@code node}. If
* the container is {@code alreadyReserved} on the node, simply
* update relevant bookeeping. This dispatches ro relevant handlers
* in {@link FSSchedulerNode}..
*/
private void reserve(Priority priority, FSSchedulerNode node,
Container container, boolean alreadyReserved) {
LOG.info("Making reservation: node=" + node.getNodeName() +
" app_id=" + getApplicationId());
if (!alreadyReserved) {
getMetrics().reserveResource(getUser(), container.getResource());
RMContainer rmContainer =
super.reserve(node, priority, null, container);
node.reserveResource(this, priority, rmContainer);
} else {
RMContainer rmContainer = node.getReservedContainer();
super.reserve(node, priority, rmContainer, container);
node.reserveResource(this, priority, rmContainer);
}
}
/**
* Remove the reservation on {@code node} at the given {@link Priority}.
* This dispatches SchedulerNode handlers as well.
*/
public void unreserve(Priority priority, FSSchedulerNode node) {
RMContainer rmContainer = node.getReservedContainer();
unreserveInternal(priority, node);
node.unreserveResource(this);
getMetrics().unreserveResource(
getUser(), rmContainer.getContainer().getResource());
}
/**
* Assign a container to this node to facilitate {@code request}. If node does
* not have enough memory, create a reservation. This is called once we are
* sure the particular request should be facilitated by this node.
*
* @param node
* The node to try placing the container on.
* @param request
* The ResourceRequest we're trying to satisfy.
* @param type
* The locality of the assignment.
* @param reserved
* Whether there's already a container reserved for this app on the node.
* @return
* If an assignment was made, returns the resources allocated to the
* container. If a reservation was made, returns
* FairScheduler.CONTAINER_RESERVED. If no assignment or reservation was
* made, returns an empty resource.
*/
private Resource assignContainer(
FSSchedulerNode node, ResourceRequest request, NodeType type,
boolean reserved) {
// How much does this request need?
Resource capability = request.getCapability();
// How much does the node have?
Resource available = node.getAvailableResource();
Container container = null;
if (reserved) {
container = node.getReservedContainer().getContainer();
} else {
container = createContainer(node, capability, request.getPriority());
}
// Can we allocate a container on this node?
if (Resources.fitsIn(capability, available)) {
// Inform the application of the new container for this request
RMContainer allocatedContainer =
allocate(type, node, request.getPriority(), request, container);
if (allocatedContainer == null) {
// Did the application need this resource?
if (reserved) {
unreserve(request.getPriority(), node);
}
return Resources.none();
}
// If we had previously made a reservation, delete it
if (reserved) {
unreserve(request.getPriority(), node);
}
// Inform the node
node.allocateContainer(allocatedContainer);
// If not running unmanaged, the first container we allocate is always
// the AM. Set the amResource for this app and update the leaf queue's AM
// usage
if (!isAmRunning() && !getUnmanagedAM()) {
setAMResource(container.getResource());
getQueue().addAMResourceUsage(container.getResource());
setAmRunning(true);
}
return container.getResource();
}
// The desired container won't fit here, so reserve
reserve(request.getPriority(), node, container, reserved);
return FairScheduler.CONTAINER_RESERVED;
}
private boolean hasNodeOrRackLocalRequests(Priority priority) {
return getResourceRequests(priority).size() > 1;
}
/**
* Whether the AM container for this app is over maxAMShare limit.
*/
private boolean isOverAMShareLimit() {
// Check the AM resource usage for the leaf queue
if (!isAmRunning() && !getUnmanagedAM()) {
List<ResourceRequest> ask = appSchedulingInfo.getAllResourceRequests();
if (ask.isEmpty() || !getQueue().canRunAppAM(
ask.get(0).getCapability())) {
return true;
}
}
return false;
}
private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
if (LOG.isDebugEnabled()) {
LOG.debug("Node offered to app: " + getName() + " reserved: " + reserved);
}
Collection<Priority> prioritiesToTry = (reserved) ?
Arrays.asList(node.getReservedContainer().getReservedPriority()) :
getPriorities();
// For each priority, see if we can schedule a node local, rack local
// or off-switch request. Rack of off-switch requests may be delayed
// (not scheduled) in order to promote better locality.
synchronized (this) {
for (Priority priority : prioritiesToTry) {
// Skip it for reserved container, since
// we already check it in isValidReservation.
if (!reserved && !hasContainerForNode(priority, node)) {
continue;
}
addSchedulingOpportunity(priority);
ResourceRequest rackLocalRequest = getResourceRequest(priority,
node.getRackName());
ResourceRequest localRequest = getResourceRequest(priority,
node.getNodeName());
if (localRequest != null && !localRequest.getRelaxLocality()) {
LOG.warn("Relax locality off is not supported on local request: "
+ localRequest);
}
NodeType allowedLocality;
if (scheduler.isContinuousSchedulingEnabled()) {
allowedLocality = getAllowedLocalityLevelByTime(priority,
scheduler.getNodeLocalityDelayMs(),
scheduler.getRackLocalityDelayMs(),
scheduler.getClock().getTime());
} else {
allowedLocality = getAllowedLocalityLevel(priority,
scheduler.getNumClusterNodes(),
scheduler.getNodeLocalityThreshold(),
scheduler.getRackLocalityThreshold());
}
if (rackLocalRequest != null && rackLocalRequest.getNumContainers() != 0
&& localRequest != null && localRequest.getNumContainers() != 0) {
return assignContainer(node, localRequest,
NodeType.NODE_LOCAL, reserved);
}
if (rackLocalRequest != null && !rackLocalRequest.getRelaxLocality()) {
continue;
}
if (rackLocalRequest != null && rackLocalRequest.getNumContainers() != 0
&& (allowedLocality.equals(NodeType.RACK_LOCAL) ||
allowedLocality.equals(NodeType.OFF_SWITCH))) {
return assignContainer(node, rackLocalRequest,
NodeType.RACK_LOCAL, reserved);
}
ResourceRequest offSwitchRequest =
getResourceRequest(priority, ResourceRequest.ANY);
if (offSwitchRequest != null && !offSwitchRequest.getRelaxLocality()) {
continue;
}
if (offSwitchRequest != null &&
offSwitchRequest.getNumContainers() != 0) {
if (!hasNodeOrRackLocalRequests(priority) ||
allowedLocality.equals(NodeType.OFF_SWITCH)) {
return assignContainer(
node, offSwitchRequest, NodeType.OFF_SWITCH, reserved);
}
}
}
}
return Resources.none();
}
/**
* Whether this app has containers requests that could be satisfied on the
* given node, if the node had full space.
*/
private boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
ResourceRequest anyRequest = getResourceRequest(prio, ResourceRequest.ANY);
ResourceRequest rackRequest = getResourceRequest(prio, node.getRackName());
ResourceRequest nodeRequest = getResourceRequest(prio, node.getNodeName());
return
// There must be outstanding requests at the given priority:
anyRequest != null && anyRequest.getNumContainers() > 0 &&
// If locality relaxation is turned off at *-level, there must be a
// non-zero request for the node's rack:
(anyRequest.getRelaxLocality() ||
(rackRequest != null && rackRequest.getNumContainers() > 0)) &&
// If locality relaxation is turned off at rack-level, there must be a
// non-zero request at the node:
(rackRequest == null || rackRequest.getRelaxLocality() ||
(nodeRequest != null && nodeRequest.getNumContainers() > 0)) &&
// The requested container must be able to fit on the node:
Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
anyRequest.getCapability(),
node.getRMNode().getTotalCapability()) &&
// The requested container must fit in queue maximum share:
getQueue().fitsInMaxShare(anyRequest.getCapability());
}
private boolean isValidReservation(FSSchedulerNode node) {
Priority reservedPriority = node.getReservedContainer().
getReservedPriority();
return hasContainerForNode(reservedPriority, node) &&
!isOverAMShareLimit();
}
/**
* Called when this application already has an existing reservation on the
* given node. Sees whether we can turn the reservation into an allocation.
* Also checks whether the application needs the reservation anymore, and
* releases it if not.
*
* @param node
* Node that the application has an existing reservation on
* @return whether the reservation on the given node is valid.
*/
public boolean assignReservedContainer(FSSchedulerNode node) {
RMContainer rmContainer = node.getReservedContainer();
Priority reservedPriority = rmContainer.getReservedPriority();
if (!isValidReservation(node)) {
// Don't hold the reservation if app can no longer use it
LOG.info("Releasing reservation that cannot be satisfied for " +
"application " + getApplicationAttemptId() + " on node " + node);
unreserve(reservedPriority, node);
return false;
}
// Reservation valid; try to fulfill the reservation
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to fulfill reservation for application "
+ getApplicationAttemptId() + " on node: " + node);
}
// Fail early if the reserved container won't fit.
// Note that we have an assumption here that
// there's only one container size per priority.
if (Resources.fitsIn(node.getReservedContainer().getReservedResource(),
node.getAvailableResource())) {
assignContainer(node, true);
}
return true;
}
static class RMContainerComparator implements Comparator<RMContainer>,
Serializable {
@Override
public int compare(RMContainer c1, RMContainer c2) {
int ret = c1.getContainer().getPriority().compareTo(
c2.getContainer().getPriority());
if (ret == 0) {
return c2.getContainerId().compareTo(c1.getContainerId());
}
return ret;
}
}
/* Schedulable methods implementation */
@Override
public String getName() {
return getApplicationId().toString();
}
@Override
public Resource getDemand() {
return demand;
}
@Override
public long getStartTime() {
return startTime;
}
@Override
public Resource getMinShare() {
return Resources.none();
}
@Override
public Resource getMaxShare() {
return Resources.unbounded();
}
@Override
public Resource getResourceUsage() {
// Here the getPreemptedResources() always return zero, except in
// a preemption round
return Resources.subtract(getCurrentConsumption(), getPreemptedResources());
}
@Override
public ResourceWeights getWeights() {
return scheduler.getAppWeight(this);
}
@Override
public Priority getPriority() {
// Right now per-app priorities are not passed to scheduler,
// so everyone has the same priority.
return priority;
}
@Override
public Resource getFairShare() {
return this.fairShare;
}
@Override
public void setFairShare(Resource fairShare) {
this.fairShare = fairShare;
}
@Override
public void updateDemand() {
demand = Resources.createResource(0);
// Demand is current consumption plus outstanding requests
Resources.addTo(demand, getCurrentConsumption());
// Add up outstanding resource requests
synchronized (this) {
for (Priority p : getPriorities()) {
for (ResourceRequest r : getResourceRequests(p).values()) {
Resources.multiplyAndAddTo(demand,
r.getCapability(), r.getNumContainers());
}
}
}
}
@Override
public Resource assignContainer(FSSchedulerNode node) {
if (isOverAMShareLimit()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping allocation because maxAMShare limit would " +
"be exceeded");
}
return Resources.none();
}
return assignContainer(node, false);
}
/**
* Preempt a running container according to the priority
*/
@Override
public RMContainer preemptContainer() {
if (LOG.isDebugEnabled()) {
LOG.debug("App " + getName() + " is going to preempt a running " +
"container");
}
RMContainer toBePreempted = null;
for (RMContainer container : getLiveContainers()) {
if (!getPreemptionContainers().contains(container) &&
(toBePreempted == null ||
comparator.compare(toBePreempted, container) > 0)) {
toBePreempted = container;
}
}
return toBePreempted;
}
}
| 31,032 | 35.768957 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
public class AllocationConfiguration extends ReservationSchedulerConfiguration {
private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*");
private static final AccessControlList NOBODY_ACL = new AccessControlList(" ");
// Minimum resource allocation for each queue
private final Map<String, Resource> minQueueResources;
// Maximum amount of resources per queue
@VisibleForTesting
final Map<String, Resource> maxQueueResources;
// Sharing weights for each queue
private final Map<String, ResourceWeights> queueWeights;
// Max concurrent running applications for each queue and for each user; in addition,
// for users that have no max specified, we use the userMaxJobsDefault.
@VisibleForTesting
final Map<String, Integer> queueMaxApps;
@VisibleForTesting
final Map<String, Integer> userMaxApps;
private final int userMaxAppsDefault;
private final int queueMaxAppsDefault;
// Maximum resource share for each leaf queue that can be used to run AMs
final Map<String, Float> queueMaxAMShares;
private final float queueMaxAMShareDefault;
// ACL's for each queue. Only specifies non-default ACL's from configuration.
private final Map<String, Map<QueueACL, AccessControlList>> queueAcls;
// Min share preemption timeout for each queue in seconds. If a job in the queue
// waits this long without receiving its guaranteed share, it is allowed to
// preempt other jobs' tasks.
private final Map<String, Long> minSharePreemptionTimeouts;
// Fair share preemption timeout for each queue in seconds. If a job in the
// queue waits this long without receiving its fair share threshold, it is
// allowed to preempt other jobs' tasks.
private final Map<String, Long> fairSharePreemptionTimeouts;
// The fair share preemption threshold for each queue. If a queue waits
// fairSharePreemptionTimeout without receiving
// fairshare * fairSharePreemptionThreshold resources, it is allowed to
// preempt other queues' tasks.
private final Map<String, Float> fairSharePreemptionThresholds;
private final Set<String> reservableQueues;
private final Map<String, SchedulingPolicy> schedulingPolicies;
private final SchedulingPolicy defaultSchedulingPolicy;
// Policy for mapping apps to queues
@VisibleForTesting
QueuePlacementPolicy placementPolicy;
//Configured queues in the alloc xml
@VisibleForTesting
Map<FSQueueType, Set<String>> configuredQueues;
// Reservation system configuration
private ReservationQueueConfiguration globalReservationQueueConfig;
public AllocationConfiguration(Map<String, Resource> minQueueResources,
Map<String, Resource> maxQueueResources,
Map<String, Integer> queueMaxApps, Map<String, Integer> userMaxApps,
Map<String, ResourceWeights> queueWeights,
Map<String, Float> queueMaxAMShares, int userMaxAppsDefault,
int queueMaxAppsDefault, float queueMaxAMShareDefault,
Map<String, SchedulingPolicy> schedulingPolicies,
SchedulingPolicy defaultSchedulingPolicy,
Map<String, Long> minSharePreemptionTimeouts,
Map<String, Long> fairSharePreemptionTimeouts,
Map<String, Float> fairSharePreemptionThresholds,
Map<String, Map<QueueACL, AccessControlList>> queueAcls,
QueuePlacementPolicy placementPolicy,
Map<FSQueueType, Set<String>> configuredQueues,
ReservationQueueConfiguration globalReservationQueueConfig,
Set<String> reservableQueues) {
this.minQueueResources = minQueueResources;
this.maxQueueResources = maxQueueResources;
this.queueMaxApps = queueMaxApps;
this.userMaxApps = userMaxApps;
this.queueMaxAMShares = queueMaxAMShares;
this.queueWeights = queueWeights;
this.userMaxAppsDefault = userMaxAppsDefault;
this.queueMaxAppsDefault = queueMaxAppsDefault;
this.queueMaxAMShareDefault = queueMaxAMShareDefault;
this.defaultSchedulingPolicy = defaultSchedulingPolicy;
this.schedulingPolicies = schedulingPolicies;
this.minSharePreemptionTimeouts = minSharePreemptionTimeouts;
this.fairSharePreemptionTimeouts = fairSharePreemptionTimeouts;
this.fairSharePreemptionThresholds = fairSharePreemptionThresholds;
this.queueAcls = queueAcls;
this.reservableQueues = reservableQueues;
this.globalReservationQueueConfig = globalReservationQueueConfig;
this.placementPolicy = placementPolicy;
this.configuredQueues = configuredQueues;
}
public AllocationConfiguration(Configuration conf) {
minQueueResources = new HashMap<String, Resource>();
maxQueueResources = new HashMap<String, Resource>();
queueWeights = new HashMap<String, ResourceWeights>();
queueMaxApps = new HashMap<String, Integer>();
userMaxApps = new HashMap<String, Integer>();
queueMaxAMShares = new HashMap<String, Float>();
userMaxAppsDefault = Integer.MAX_VALUE;
queueMaxAppsDefault = Integer.MAX_VALUE;
queueMaxAMShareDefault = 0.5f;
queueAcls = new HashMap<String, Map<QueueACL, AccessControlList>>();
minSharePreemptionTimeouts = new HashMap<String, Long>();
fairSharePreemptionTimeouts = new HashMap<String, Long>();
fairSharePreemptionThresholds = new HashMap<String, Float>();
schedulingPolicies = new HashMap<String, SchedulingPolicy>();
defaultSchedulingPolicy = SchedulingPolicy.DEFAULT_POLICY;
reservableQueues = new HashSet<>();
configuredQueues = new HashMap<FSQueueType, Set<String>>();
for (FSQueueType queueType : FSQueueType.values()) {
configuredQueues.put(queueType, new HashSet<String>());
}
placementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
configuredQueues);
}
/**
* Get the ACLs associated with this queue. If a given ACL is not explicitly
* configured, include the default value for that ACL. The default for the
* root queue is everybody ("*") and the default for all other queues is
* nobody ("")
*/
public AccessControlList getQueueAcl(String queue, QueueACL operation) {
Map<QueueACL, AccessControlList> queueAcls = this.queueAcls.get(queue);
if (queueAcls != null) {
AccessControlList operationAcl = queueAcls.get(operation);
if (operationAcl != null) {
return operationAcl;
}
}
return (queue.equals("root")) ? EVERYBODY_ACL : NOBODY_ACL;
}
/**
* Get a queue's min share preemption timeout configured in the allocation
* file, in milliseconds. Return -1 if not set.
*/
public long getMinSharePreemptionTimeout(String queueName) {
Long minSharePreemptionTimeout = minSharePreemptionTimeouts.get(queueName);
return (minSharePreemptionTimeout == null) ? -1 : minSharePreemptionTimeout;
}
/**
* Get a queue's fair share preemption timeout configured in the allocation
* file, in milliseconds. Return -1 if not set.
*/
public long getFairSharePreemptionTimeout(String queueName) {
Long fairSharePreemptionTimeout = fairSharePreemptionTimeouts.get(queueName);
return (fairSharePreemptionTimeout == null) ?
-1 : fairSharePreemptionTimeout;
}
/**
* Get a queue's fair share preemption threshold in the allocation file.
* Return -1f if not set.
*/
public float getFairSharePreemptionThreshold(String queueName) {
Float fairSharePreemptionThreshold =
fairSharePreemptionThresholds.get(queueName);
return (fairSharePreemptionThreshold == null) ?
-1f : fairSharePreemptionThreshold;
}
public ResourceWeights getQueueWeight(String queue) {
ResourceWeights weight = queueWeights.get(queue);
return (weight == null) ? ResourceWeights.NEUTRAL : weight;
}
public void setQueueWeight(String queue, ResourceWeights weight) {
queueWeights.put(queue, weight);
}
public int getUserMaxApps(String user) {
Integer maxApps = userMaxApps.get(user);
return (maxApps == null) ? userMaxAppsDefault : maxApps;
}
public int getQueueMaxApps(String queue) {
Integer maxApps = queueMaxApps.get(queue);
return (maxApps == null) ? queueMaxAppsDefault : maxApps;
}
public float getQueueMaxAMShare(String queue) {
Float maxAMShare = queueMaxAMShares.get(queue);
return (maxAMShare == null) ? queueMaxAMShareDefault : maxAMShare;
}
/**
* Get the minimum resource allocation for the given queue.
* @return the cap set on this queue, or 0 if not set.
*/
public Resource getMinResources(String queue) {
Resource minQueueResource = minQueueResources.get(queue);
return (minQueueResource == null) ? Resources.none() : minQueueResource;
}
/**
* Get the maximum resource allocation for the given queue.
* @return the cap set on this queue, or Integer.MAX_VALUE if not set.
*/
public Resource getMaxResources(String queueName) {
Resource maxQueueResource = maxQueueResources.get(queueName);
return (maxQueueResource == null) ? Resources.unbounded() : maxQueueResource;
}
public boolean hasAccess(String queueName, QueueACL acl,
UserGroupInformation user) {
int lastPeriodIndex = queueName.length();
while (lastPeriodIndex != -1) {
String queue = queueName.substring(0, lastPeriodIndex);
if (getQueueAcl(queue, acl).isUserAllowed(user)) {
return true;
}
lastPeriodIndex = queueName.lastIndexOf('.', lastPeriodIndex - 1);
}
return false;
}
public SchedulingPolicy getSchedulingPolicy(String queueName) {
SchedulingPolicy policy = schedulingPolicies.get(queueName);
return (policy == null) ? defaultSchedulingPolicy : policy;
}
public SchedulingPolicy getDefaultSchedulingPolicy() {
return defaultSchedulingPolicy;
}
public Map<FSQueueType, Set<String>> getConfiguredQueues() {
return configuredQueues;
}
public QueuePlacementPolicy getPlacementPolicy() {
return placementPolicy;
}
@Override
public boolean isReservable(String queue) {
return reservableQueues.contains(queue);
}
@Override
public long getReservationWindow(String queue) {
return globalReservationQueueConfig.getReservationWindowMsec();
}
@Override
public float getAverageCapacity(String queue) {
return globalReservationQueueConfig.getAvgOverTimeMultiplier() * 100;
}
@Override
public float getInstantaneousMaxCapacity(String queue) {
return globalReservationQueueConfig.getMaxOverTimeMultiplier() * 100;
}
@Override
public String getReservationAdmissionPolicy(String queue) {
return globalReservationQueueConfig.getReservationAdmissionPolicy();
}
@Override
public String getReservationAgent(String queue) {
return globalReservationQueueConfig.getReservationAgent();
}
@Override
public boolean getShowReservationAsQueues(String queue) {
return globalReservationQueueConfig.shouldShowReservationAsQueues();
}
@Override
public String getReplanner(String queue) {
return globalReservationQueueConfig.getPlanner();
}
@Override
public boolean getMoveOnExpiry(String queue) {
return globalReservationQueueConfig.shouldMoveOnExpiry();
}
@Override
public long getEnforcementWindow(String queue) {
return globalReservationQueueConfig.getEnforcementWindowMsec();
}
@VisibleForTesting
public void setReservationWindow(long window) {
globalReservationQueueConfig.setReservationWindow(window);
}
@VisibleForTesting
public void setAverageCapacity(int avgCapacity) {
globalReservationQueueConfig.setAverageCapacity(avgCapacity);
}
}
| 13,005 | 37.252941 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/InvalidQueueNameException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* Thrown when Queue Name is malformed.
*/
@Private
@Unstable
public class InvalidQueueNameException extends IllegalArgumentException {
private static final long serialVersionUID = -7306320927804540011L;
public InvalidQueueNameException(String message) {
super(message);
}
public InvalidQueueNameException(String message, Throwable t) {
super(message, t);
}
}
| 1,401 | 34.05 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@Private
@Unstable
public class FSSchedulerNode extends SchedulerNode {
private static final Log LOG = LogFactory.getLog(FSSchedulerNode.class);
private FSAppAttempt reservedAppSchedulable;
public FSSchedulerNode(RMNode node, boolean usePortForNodeName) {
super(node, usePortForNodeName);
}
@Override
public synchronized void reserveResource(
SchedulerApplicationAttempt application, Priority priority,
RMContainer container) {
// Check if it's already reserved
RMContainer reservedContainer = getReservedContainer();
if (reservedContainer != null) {
// Sanity check
if (!container.getContainer().getNodeId().equals(getNodeID())) {
throw new IllegalStateException("Trying to reserve" +
" container " + container +
" on node " + container.getReservedNode() +
" when currently" + " reserved resource " + reservedContainer +
" on node " + reservedContainer.getReservedNode());
}
// Cannot reserve more than one application on a given node!
if (!reservedContainer.getContainer().getId().getApplicationAttemptId()
.equals(container.getContainer().getId().getApplicationAttemptId())) {
throw new IllegalStateException("Trying to reserve" +
" container " + container +
" for application " + application.getApplicationId() +
" when currently" +
" reserved container " + reservedContainer +
" on node " + this);
}
LOG.info("Updated reserved container " + container.getContainer().getId()
+ " on node " + this + " for application "
+ application.getApplicationId());
} else {
LOG.info("Reserved container " + container.getContainer().getId()
+ " on node " + this + " for application "
+ application.getApplicationId());
}
setReservedContainer(container);
this.reservedAppSchedulable = (FSAppAttempt) application;
}
@Override
public synchronized void unreserveResource(
SchedulerApplicationAttempt application) {
// Cannot unreserve for wrong application...
ApplicationAttemptId reservedApplication =
getReservedContainer().getContainer().getId().getApplicationAttemptId();
if (!reservedApplication.equals(
application.getApplicationAttemptId())) {
throw new IllegalStateException("Trying to unreserve " +
" for application " + application.getApplicationId() +
" when currently reserved " +
" for application " + reservedApplication.getApplicationId() +
" on node " + this);
}
setReservedContainer(null);
this.reservedAppSchedulable = null;
}
public synchronized FSAppAttempt getReservedAppSchedulable() {
return reservedAppSchedulable;
}
}
| 4,378 | 40.311321 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.xml.sax.SAXException;
import com.google.common.annotations.VisibleForTesting;
/**
* Maintains a list of queues as well as scheduling parameters for each queue,
* such as guaranteed share allocations, from the fair scheduler config file.
*
*/
@Private
@Unstable
public class QueueManager {
public static final Log LOG = LogFactory.getLog(
QueueManager.class.getName());
public static final String ROOT_QUEUE = "root";
private final FairScheduler scheduler;
private final Collection<FSLeafQueue> leafQueues =
new CopyOnWriteArrayList<FSLeafQueue>();
private final Map<String, FSQueue> queues = new HashMap<String, FSQueue>();
private FSParentQueue rootQueue;
public QueueManager(FairScheduler scheduler) {
this.scheduler = scheduler;
}
public FSParentQueue getRootQueue() {
return rootQueue;
}
public void initialize(Configuration conf) throws IOException,
SAXException, AllocationConfigurationException, ParserConfigurationException {
rootQueue = new FSParentQueue("root", scheduler, null);
queues.put(rootQueue.getName(), rootQueue);
// Create the default queue
getLeafQueue(YarnConfiguration.DEFAULT_QUEUE_NAME, true);
}
/**
* Get a leaf queue by name, creating it if the create param is true and is necessary.
* If the queue is not or can not be a leaf queue, i.e. it already exists as a
* parent queue, or one of the parents in its name is already a leaf queue,
* null is returned.
*
* The root part of the name is optional, so a queue underneath the root
* named "queue1" could be referred to as just "queue1", and a queue named
* "queue2" underneath a parent named "parent1" that is underneath the root
* could be referred to as just "parent1.queue2".
*/
public FSLeafQueue getLeafQueue(String name, boolean create) {
FSQueue queue = getQueue(name, create, FSQueueType.LEAF);
if (queue instanceof FSParentQueue) {
return null;
}
return (FSLeafQueue) queue;
}
/**
* Remove a leaf queue if empty
* @param name name of the queue
* @return true if queue was removed or false otherwise
*/
public boolean removeLeafQueue(String name) {
name = ensureRootPrefix(name);
return removeEmptyIncompatibleQueues(name, FSQueueType.PARENT);
}
/**
* Get a parent queue by name, creating it if the create param is true and is necessary.
* If the queue is not or can not be a parent queue, i.e. it already exists as a
* leaf queue, or one of the parents in its name is already a leaf queue,
* null is returned.
*
* The root part of the name is optional, so a queue underneath the root
* named "queue1" could be referred to as just "queue1", and a queue named
* "queue2" underneath a parent named "parent1" that is underneath the root
* could be referred to as just "parent1.queue2".
*/
public FSParentQueue getParentQueue(String name, boolean create) {
FSQueue queue = getQueue(name, create, FSQueueType.PARENT);
if (queue instanceof FSLeafQueue) {
return null;
}
return (FSParentQueue) queue;
}
private FSQueue getQueue(String name, boolean create, FSQueueType queueType) {
name = ensureRootPrefix(name);
synchronized (queues) {
FSQueue queue = queues.get(name);
if (queue == null && create) {
// if the queue doesn't exist,create it and return
queue = createQueue(name, queueType);
// Update steady fair share for all queues
if (queue != null) {
rootQueue.recomputeSteadyShares();
}
}
return queue;
}
}
/**
* Creates a leaf or parent queue based on what is specified in 'queueType'
* and places it in the tree. Creates any parents that don't already exist.
*
* @return
* the created queue, if successful. null if not allowed (one of the parent
* queues in the queue name is already a leaf queue)
*/
private FSQueue createQueue(String name, FSQueueType queueType) {
List<String> newQueueNames = new ArrayList<String>();
newQueueNames.add(name);
int sepIndex = name.length();
FSParentQueue parent = null;
// Move up the queue tree until we reach one that exists.
while (sepIndex != -1) {
int prevSepIndex = sepIndex;
sepIndex = name.lastIndexOf('.', sepIndex-1);
String node = name.substring(sepIndex+1, prevSepIndex);
if (!isQueueNameValid(node)) {
throw new InvalidQueueNameException("Illegal node name at offset " +
(sepIndex+1) + " for queue name " + name);
}
FSQueue queue;
String curName = null;
curName = name.substring(0, sepIndex);
queue = queues.get(curName);
if (queue == null) {
newQueueNames.add(curName);
} else {
if (queue instanceof FSParentQueue) {
parent = (FSParentQueue)queue;
break;
} else {
return null;
}
}
}
// At this point, parent refers to the deepest existing parent of the
// queue to create.
// Now that we know everything worked out, make all the queues
// and add them to the map.
AllocationConfiguration queueConf = scheduler.getAllocationConfiguration();
FSLeafQueue leafQueue = null;
for (int i = newQueueNames.size()-1; i >= 0; i--) {
String queueName = newQueueNames.get(i);
if (i == 0 && queueType != FSQueueType.PARENT) {
leafQueue = new FSLeafQueue(name, scheduler, parent);
try {
leafQueue.setPolicy(queueConf.getDefaultSchedulingPolicy());
} catch (AllocationConfigurationException ex) {
LOG.warn("Failed to set default scheduling policy "
+ queueConf.getDefaultSchedulingPolicy() + " on new leaf queue.", ex);
}
parent.addChildQueue(leafQueue);
queues.put(leafQueue.getName(), leafQueue);
leafQueues.add(leafQueue);
leafQueue.updatePreemptionVariables();
return leafQueue;
} else {
FSParentQueue newParent = new FSParentQueue(queueName, scheduler, parent);
try {
newParent.setPolicy(queueConf.getDefaultSchedulingPolicy());
} catch (AllocationConfigurationException ex) {
LOG.warn("Failed to set default scheduling policy "
+ queueConf.getDefaultSchedulingPolicy() + " on new parent queue.", ex);
}
parent.addChildQueue(newParent);
queues.put(newParent.getName(), newParent);
newParent.updatePreemptionVariables();
parent = newParent;
}
}
return parent;
}
/**
* Make way for the given queue if possible, by removing incompatible
* queues with no apps in them. Incompatibility could be due to
* (1) queueToCreate being currently a parent but needs to change to leaf
* (2) queueToCreate being currently a leaf but needs to change to parent
* (3) an existing leaf queue in the ancestry of queueToCreate.
*
* We will never remove the root queue or the default queue in this way.
*
* @return true if we can create queueToCreate or it already exists.
*/
private boolean removeEmptyIncompatibleQueues(String queueToCreate,
FSQueueType queueType) {
queueToCreate = ensureRootPrefix(queueToCreate);
// Ensure queueToCreate is not root and doesn't have the default queue in its
// ancestry.
if (queueToCreate.equals(ROOT_QUEUE) ||
queueToCreate.startsWith(
ROOT_QUEUE + "." + YarnConfiguration.DEFAULT_QUEUE_NAME + ".")) {
return false;
}
FSQueue queue = queues.get(queueToCreate);
// Queue exists already.
if (queue != null) {
if (queue instanceof FSLeafQueue) {
if (queueType == FSQueueType.LEAF) {
// if queue is already a leaf then return true
return true;
}
// remove incompatibility since queue is a leaf currently
// needs to change to a parent.
return removeQueueIfEmpty(queue);
} else {
if (queueType == FSQueueType.PARENT) {
return true;
}
// If it's an existing parent queue and needs to change to leaf,
// remove it if it's empty.
return removeQueueIfEmpty(queue);
}
}
// Queue doesn't exist already. Check if the new queue would be created
// under an existing leaf queue. If so, try removing that leaf queue.
int sepIndex = queueToCreate.length();
sepIndex = queueToCreate.lastIndexOf('.', sepIndex-1);
while (sepIndex != -1) {
String prefixString = queueToCreate.substring(0, sepIndex);
FSQueue prefixQueue = queues.get(prefixString);
if (prefixQueue != null && prefixQueue instanceof FSLeafQueue) {
return removeQueueIfEmpty(prefixQueue);
}
sepIndex = queueToCreate.lastIndexOf('.', sepIndex-1);
}
return true;
}
/**
* Remove the queue if it and its descendents are all empty.
* @param queue
* @return true if removed, false otherwise
*/
private boolean removeQueueIfEmpty(FSQueue queue) {
if (isEmpty(queue)) {
removeQueue(queue);
return true;
}
return false;
}
/**
* Remove a queue and all its descendents.
*/
private void removeQueue(FSQueue queue) {
if (queue instanceof FSLeafQueue) {
leafQueues.remove(queue);
} else {
List<FSQueue> childQueues = queue.getChildQueues();
while (!childQueues.isEmpty()) {
removeQueue(childQueues.get(0));
}
}
queues.remove(queue.getName());
FSParentQueue parent = queue.getParent();
parent.removeChildQueue(queue);
}
/**
* Returns true if there are no applications, running or not, in the given
* queue or any of its descendents.
*/
protected boolean isEmpty(FSQueue queue) {
if (queue instanceof FSLeafQueue) {
FSLeafQueue leafQueue = (FSLeafQueue)queue;
return queue.getNumRunnableApps() == 0 &&
leafQueue.getNumNonRunnableApps() == 0;
} else {
for (FSQueue child : queue.getChildQueues()) {
if (!isEmpty(child)) {
return false;
}
}
return true;
}
}
/**
* Gets a queue by name.
*/
public FSQueue getQueue(String name) {
name = ensureRootPrefix(name);
synchronized (queues) {
return queues.get(name);
}
}
/**
* Return whether a queue exists already.
*/
public boolean exists(String name) {
name = ensureRootPrefix(name);
synchronized (queues) {
return queues.containsKey(name);
}
}
/**
* Get a collection of all leaf queues
*/
public Collection<FSLeafQueue> getLeafQueues() {
synchronized (queues) {
return leafQueues;
}
}
/**
* Get a collection of all queues
*/
public Collection<FSQueue> getQueues() {
return queues.values();
}
private String ensureRootPrefix(String name) {
if (!name.startsWith(ROOT_QUEUE + ".") && !name.equals(ROOT_QUEUE)) {
name = ROOT_QUEUE + "." + name;
}
return name;
}
public void updateAllocationConfiguration(AllocationConfiguration queueConf) {
// Create leaf queues and the parent queues in a leaf's ancestry if they do not exist
for (String name : queueConf.getConfiguredQueues().get(FSQueueType.LEAF)) {
if (removeEmptyIncompatibleQueues(name, FSQueueType.LEAF)) {
getLeafQueue(name, true);
}
}
// At this point all leaves and 'parents with at least one child' would have been created.
// Now create parents with no configured leaf.
for (String name : queueConf.getConfiguredQueues().get(
FSQueueType.PARENT)) {
if (removeEmptyIncompatibleQueues(name, FSQueueType.PARENT)) {
getParentQueue(name, true);
}
}
for (FSQueue queue : queues.values()) {
// Update queue metrics
FSQueueMetrics queueMetrics = queue.getMetrics();
queueMetrics.setMinShare(queue.getMinShare());
queueMetrics.setMaxShare(queue.getMaxShare());
// Set scheduling policies
try {
SchedulingPolicy policy = queueConf.getSchedulingPolicy(queue.getName());
policy.initialize(scheduler.getClusterResource());
queue.setPolicy(policy);
} catch (AllocationConfigurationException ex) {
LOG.warn("Cannot apply configured scheduling policy to queue "
+ queue.getName(), ex);
}
}
// Update steady fair shares for all queues
rootQueue.recomputeSteadyShares();
// Update the fair share preemption timeouts and preemption for all queues
// recursively
rootQueue.updatePreemptionVariables();
}
/**
* Check whether queue name is valid,
* return true if it is valid, otherwise return false.
*/
@VisibleForTesting
boolean isQueueNameValid(String node) {
return !node.isEmpty() && node.equals(node.trim());
}
}
| 14,349 | 33.004739 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewAppWeightBooster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
/**
* A {@link WeightAdjuster} implementation that gives a weight boost to new jobs
* for a certain amount of time -- by default, a 3x weight boost for 60 seconds.
* This can be used to make shorter jobs finish faster, emulating Shortest Job
* First scheduling while not starving long jobs.
*/
@Private
@Unstable
public class NewAppWeightBooster extends Configured implements WeightAdjuster {
private static final float DEFAULT_FACTOR = 3;
private static final long DEFAULT_DURATION = 5 * 60 * 1000;
private float factor;
private long duration;
public void setConf(Configuration conf) {
if (conf != null) {
factor = conf.getFloat("mapred.newjobweightbooster.factor",
DEFAULT_FACTOR);
duration = conf.getLong("mapred.newjobweightbooster.duration",
DEFAULT_DURATION);
}
super.setConf(conf);
}
public double adjustWeight(FSAppAttempt app, double curWeight) {
long start = app.getStartTime();
long now = System.currentTimeMillis();
if (now - start < duration) {
return curWeight * factor;
} else {
return curWeight;
}
}
}
| 2,221 | 35.42623 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
import com.google.common.annotations.VisibleForTesting;
@Public
@Unstable
public class AllocationFileLoaderService extends AbstractService {
public static final Log LOG = LogFactory.getLog(
AllocationFileLoaderService.class.getName());
/** Time to wait between checks of the allocation file */
public static final long ALLOC_RELOAD_INTERVAL_MS = 10 * 1000;
/**
* Time to wait after the allocation has been modified before reloading it
* (this is done to prevent loading a file that hasn't been fully written).
*/
public static final long ALLOC_RELOAD_WAIT_MS = 5 * 1000;
public static final long THREAD_JOIN_TIMEOUT_MS = 1000;
private final Clock clock;
private long lastSuccessfulReload; // Last time we successfully reloaded queues
private boolean lastReloadAttemptFailed = false;
// Path to XML file containing allocations.
private File allocFile;
private Listener reloadListener;
@VisibleForTesting
long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS;
private Thread reloadThread;
private volatile boolean running = true;
public AllocationFileLoaderService() {
this(new SystemClock());
}
public AllocationFileLoaderService(Clock clock) {
super(AllocationFileLoaderService.class.getName());
this.clock = clock;
}
@Override
public void serviceInit(Configuration conf) throws Exception {
this.allocFile = getAllocationFile(conf);
if (allocFile != null) {
reloadThread = new Thread() {
@Override
public void run() {
while (running) {
long time = clock.getTime();
long lastModified = allocFile.lastModified();
if (lastModified > lastSuccessfulReload &&
time > lastModified + ALLOC_RELOAD_WAIT_MS) {
try {
reloadAllocations();
} catch (Exception ex) {
if (!lastReloadAttemptFailed) {
LOG.error("Failed to reload fair scheduler config file - " +
"will use existing allocations.", ex);
}
lastReloadAttemptFailed = true;
}
} else if (lastModified == 0l) {
if (!lastReloadAttemptFailed) {
LOG.warn("Failed to reload fair scheduler config file because" +
" last modified returned 0. File exists: "
+ allocFile.exists());
}
lastReloadAttemptFailed = true;
}
try {
Thread.sleep(reloadIntervalMs);
} catch (InterruptedException ex) {
LOG.info(
"Interrupted while waiting to reload alloc configuration");
}
}
}
};
reloadThread.setName("AllocationFileReloader");
reloadThread.setDaemon(true);
}
super.serviceInit(conf);
}
@Override
public void serviceStart() throws Exception {
if (reloadThread != null) {
reloadThread.start();
}
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
running = false;
if (reloadThread != null) {
reloadThread.interrupt();
try {
reloadThread.join(THREAD_JOIN_TIMEOUT_MS);
} catch (InterruptedException e) {
LOG.warn("reloadThread fails to join.");
}
}
super.serviceStop();
}
/**
* Path to XML file containing allocations. If the
* path is relative, it is searched for in the
* classpath, but loaded like a regular File.
*/
public File getAllocationFile(Configuration conf) {
String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE,
FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE);
File allocFile = new File(allocFilePath);
if (!allocFile.isAbsolute()) {
URL url = Thread.currentThread().getContextClassLoader()
.getResource(allocFilePath);
if (url == null) {
LOG.warn(allocFilePath + " not found on the classpath.");
allocFile = null;
} else if (!url.getProtocol().equalsIgnoreCase("file")) {
throw new RuntimeException("Allocation file " + url
+ " found on the classpath is not on the local filesystem.");
} else {
allocFile = new File(url.getPath());
}
}
return allocFile;
}
public synchronized void setReloadListener(Listener reloadListener) {
this.reloadListener = reloadListener;
}
/**
* Updates the allocation list from the allocation config file. This file is
* expected to be in the XML format specified in the design doc.
*
* @throws IOException if the config file cannot be read.
* @throws AllocationConfigurationException if allocations are invalid.
* @throws ParserConfigurationException if XML parser is misconfigured.
* @throws SAXException if config file is malformed.
*/
public synchronized void reloadAllocations() throws IOException,
ParserConfigurationException, SAXException, AllocationConfigurationException {
if (allocFile == null) {
return;
}
LOG.info("Loading allocation file " + allocFile);
// Create some temporary hashmaps to hold the new allocs, and we only save
// them in our fields if we have parsed the entire allocs file successfully.
Map<String, Resource> minQueueResources = new HashMap<String, Resource>();
Map<String, Resource> maxQueueResources = new HashMap<String, Resource>();
Map<String, Integer> queueMaxApps = new HashMap<String, Integer>();
Map<String, Integer> userMaxApps = new HashMap<String, Integer>();
Map<String, Float> queueMaxAMShares = new HashMap<String, Float>();
Map<String, ResourceWeights> queueWeights = new HashMap<String, ResourceWeights>();
Map<String, SchedulingPolicy> queuePolicies = new HashMap<String, SchedulingPolicy>();
Map<String, Long> minSharePreemptionTimeouts = new HashMap<String, Long>();
Map<String, Long> fairSharePreemptionTimeouts = new HashMap<String, Long>();
Map<String, Float> fairSharePreemptionThresholds =
new HashMap<String, Float>();
Map<String, Map<QueueACL, AccessControlList>> queueAcls =
new HashMap<String, Map<QueueACL, AccessControlList>>();
Set<String> reservableQueues = new HashSet<String>();
int userMaxAppsDefault = Integer.MAX_VALUE;
int queueMaxAppsDefault = Integer.MAX_VALUE;
float queueMaxAMShareDefault = 0.5f;
long defaultFairSharePreemptionTimeout = Long.MAX_VALUE;
long defaultMinSharePreemptionTimeout = Long.MAX_VALUE;
float defaultFairSharePreemptionThreshold = 0.5f;
SchedulingPolicy defaultSchedPolicy = SchedulingPolicy.DEFAULT_POLICY;
// Reservation global configuration knobs
String planner = null;
String reservationAgent = null;
String reservationAdmissionPolicy = null;
QueuePlacementPolicy newPlacementPolicy = null;
// Remember all queue names so we can display them on web UI, etc.
// configuredQueues is segregated based on whether it is a leaf queue
// or a parent queue. This information is used for creating queues
// and also for making queue placement decisions(QueuePlacementRule.java).
Map<FSQueueType, Set<String>> configuredQueues =
new HashMap<FSQueueType, Set<String>>();
for (FSQueueType queueType : FSQueueType.values()) {
configuredQueues.put(queueType, new HashSet<String>());
}
// Read and parse the allocations file.
DocumentBuilderFactory docBuilderFactory =
DocumentBuilderFactory.newInstance();
docBuilderFactory.setIgnoringComments(true);
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = builder.parse(allocFile);
Element root = doc.getDocumentElement();
if (!"allocations".equals(root.getTagName()))
throw new AllocationConfigurationException("Bad fair scheduler config " +
"file: top-level element not <allocations>");
NodeList elements = root.getChildNodes();
List<Element> queueElements = new ArrayList<Element>();
Element placementPolicyElement = null;
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
Element element = (Element)node;
if ("queue".equals(element.getTagName()) ||
"pool".equals(element.getTagName())) {
queueElements.add(element);
} else if ("user".equals(element.getTagName())) {
String userName = element.getAttribute("name");
NodeList fields = element.getChildNodes();
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element))
continue;
Element field = (Element) fieldNode;
if ("maxRunningApps".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
int val = Integer.parseInt(text);
userMaxApps.put(userName, val);
}
}
} else if ("userMaxAppsDefault".equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
int val = Integer.parseInt(text);
userMaxAppsDefault = val;
} else if ("defaultFairSharePreemptionTimeout"
.equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
defaultFairSharePreemptionTimeout = val;
} else if ("fairSharePreemptionTimeout".equals(element.getTagName())) {
if (defaultFairSharePreemptionTimeout == Long.MAX_VALUE) {
String text = ((Text)element.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
defaultFairSharePreemptionTimeout = val;
}
} else if ("defaultMinSharePreemptionTimeout"
.equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
defaultMinSharePreemptionTimeout = val;
} else if ("defaultFairSharePreemptionThreshold"
.equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
float val = Float.parseFloat(text);
val = Math.max(Math.min(val, 1.0f), 0.0f);
defaultFairSharePreemptionThreshold = val;
} else if ("queueMaxAppsDefault".equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
int val = Integer.parseInt(text);
queueMaxAppsDefault = val;
} else if ("queueMaxAMShareDefault".equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
float val = Float.parseFloat(text);
val = Math.min(val, 1.0f);
queueMaxAMShareDefault = val;
} else if ("defaultQueueSchedulingPolicy".equals(element.getTagName())
|| "defaultQueueSchedulingMode".equals(element.getTagName())) {
String text = ((Text)element.getFirstChild()).getData().trim();
defaultSchedPolicy = SchedulingPolicy.parse(text);
} else if ("queuePlacementPolicy".equals(element.getTagName())) {
placementPolicyElement = element;
} else if ("reservation-planner".equals(element.getTagName())) {
String text = ((Text) element.getFirstChild()).getData().trim();
planner = text;
} else if ("reservation-agent".equals(element.getTagName())) {
String text = ((Text) element.getFirstChild()).getData().trim();
reservationAgent = text;
} else if ("reservation-policy".equals(element.getTagName())) {
String text = ((Text) element.getFirstChild()).getData().trim();
reservationAdmissionPolicy = text;
} else {
LOG.warn("Bad element in allocations file: " + element.getTagName());
}
}
}
// Load queue elements. A root queue can either be included or omitted. If
// it's included, all other queues must be inside it.
for (Element element : queueElements) {
String parent = "root";
if (element.getAttribute("name").equalsIgnoreCase("root")) {
if (queueElements.size() > 1) {
throw new AllocationConfigurationException("If configuring root queue,"
+ " no other queues can be placed alongside it.");
}
parent = null;
}
loadQueue(parent, element, minQueueResources, maxQueueResources,
queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights,
queuePolicies, minSharePreemptionTimeouts, fairSharePreemptionTimeouts,
fairSharePreemptionThresholds, queueAcls, configuredQueues,
reservableQueues);
}
// Load placement policy and pass it configured queues
Configuration conf = getConfig();
if (placementPolicyElement != null) {
newPlacementPolicy = QueuePlacementPolicy.fromXml(placementPolicyElement,
configuredQueues, conf);
} else {
newPlacementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
configuredQueues);
}
// Set the min/fair share preemption timeout for the root queue
if (!minSharePreemptionTimeouts.containsKey(QueueManager.ROOT_QUEUE)){
minSharePreemptionTimeouts.put(QueueManager.ROOT_QUEUE,
defaultMinSharePreemptionTimeout);
}
if (!fairSharePreemptionTimeouts.containsKey(QueueManager.ROOT_QUEUE)) {
fairSharePreemptionTimeouts.put(QueueManager.ROOT_QUEUE,
defaultFairSharePreemptionTimeout);
}
// Set the fair share preemption threshold for the root queue
if (!fairSharePreemptionThresholds.containsKey(QueueManager.ROOT_QUEUE)) {
fairSharePreemptionThresholds.put(QueueManager.ROOT_QUEUE,
defaultFairSharePreemptionThreshold);
}
ReservationQueueConfiguration globalReservationQueueConfig = new
ReservationQueueConfiguration();
if (planner != null) {
globalReservationQueueConfig.setPlanner(planner);
}
if (reservationAdmissionPolicy != null) {
globalReservationQueueConfig.setReservationAdmissionPolicy
(reservationAdmissionPolicy);
}
if (reservationAgent != null) {
globalReservationQueueConfig.setReservationAgent(reservationAgent);
}
AllocationConfiguration info = new AllocationConfiguration(minQueueResources,
maxQueueResources, queueMaxApps, userMaxApps, queueWeights,
queueMaxAMShares, userMaxAppsDefault, queueMaxAppsDefault,
queueMaxAMShareDefault, queuePolicies, defaultSchedPolicy,
minSharePreemptionTimeouts, fairSharePreemptionTimeouts,
fairSharePreemptionThresholds, queueAcls,
newPlacementPolicy, configuredQueues, globalReservationQueueConfig,
reservableQueues);
lastSuccessfulReload = clock.getTime();
lastReloadAttemptFailed = false;
reloadListener.onReload(info);
}
/**
* Loads a queue from a queue element in the configuration file
*/
private void loadQueue(String parentName, Element element,
Map<String, Resource> minQueueResources,
Map<String, Resource> maxQueueResources, Map<String, Integer> queueMaxApps,
Map<String, Integer> userMaxApps, Map<String, Float> queueMaxAMShares,
Map<String, ResourceWeights> queueWeights,
Map<String, SchedulingPolicy> queuePolicies,
Map<String, Long> minSharePreemptionTimeouts,
Map<String, Long> fairSharePreemptionTimeouts,
Map<String, Float> fairSharePreemptionThresholds,
Map<String, Map<QueueACL, AccessControlList>> queueAcls,
Map<FSQueueType, Set<String>> configuredQueues,
Set<String> reservableQueues)
throws AllocationConfigurationException {
String queueName = element.getAttribute("name").trim();
if (queueName.contains(".")) {
throw new AllocationConfigurationException("Bad fair scheduler config "
+ "file: queue name (" + queueName + ") shouldn't contain period.");
}
if (queueName.isEmpty()) {
throw new AllocationConfigurationException("Bad fair scheduler config "
+ "file: queue name shouldn't be empty or "
+ "consist only of whitespace.");
}
if (parentName != null) {
queueName = parentName + "." + queueName;
}
Map<QueueACL, AccessControlList> acls =
new HashMap<QueueACL, AccessControlList>();
NodeList fields = element.getChildNodes();
boolean isLeaf = true;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element))
continue;
Element field = (Element) fieldNode;
if ("minResources".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
Resource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
minQueueResources.put(queueName, val);
} else if ("maxResources".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
Resource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
maxQueueResources.put(queueName, val);
} else if ("maxRunningApps".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
int val = Integer.parseInt(text);
queueMaxApps.put(queueName, val);
} else if ("maxAMShare".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
float val = Float.parseFloat(text);
val = Math.min(val, 1.0f);
queueMaxAMShares.put(queueName, val);
} else if ("weight".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
double val = Double.parseDouble(text);
queueWeights.put(queueName, new ResourceWeights((float)val));
} else if ("minSharePreemptionTimeout".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
minSharePreemptionTimeouts.put(queueName, val);
} else if ("fairSharePreemptionTimeout".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
fairSharePreemptionTimeouts.put(queueName, val);
} else if ("fairSharePreemptionThreshold".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
float val = Float.parseFloat(text);
val = Math.max(Math.min(val, 1.0f), 0.0f);
fairSharePreemptionThresholds.put(queueName, val);
} else if ("schedulingPolicy".equals(field.getTagName())
|| "schedulingMode".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
SchedulingPolicy policy = SchedulingPolicy.parse(text);
queuePolicies.put(queueName, policy);
} else if ("aclSubmitApps".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData();
acls.put(QueueACL.SUBMIT_APPLICATIONS, new AccessControlList(text));
} else if ("aclAdministerApps".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData();
acls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(text));
} else if ("reservation".equals(field.getTagName())) {
isLeaf = false;
reservableQueues.add(queueName);
configuredQueues.get(FSQueueType.PARENT).add(queueName);
} else if ("queue".endsWith(field.getTagName()) ||
"pool".equals(field.getTagName())) {
loadQueue(queueName, field, minQueueResources, maxQueueResources,
queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights,
queuePolicies, minSharePreemptionTimeouts,
fairSharePreemptionTimeouts, fairSharePreemptionThresholds,
queueAcls, configuredQueues, reservableQueues);
isLeaf = false;
}
}
if (isLeaf) {
// if a leaf in the alloc file is marked as type='parent'
// then store it under 'parent'
if ("parent".equals(element.getAttribute("type"))) {
configuredQueues.get(FSQueueType.PARENT).add(queueName);
} else {
configuredQueues.get(FSQueueType.LEAF).add(queueName);
}
} else {
if ("parent".equals(element.getAttribute("type"))) {
throw new AllocationConfigurationException("Both <reservation> and " +
"type=\"parent\" found for queue " + queueName + " which is " +
"unsupported");
}
configuredQueues.get(FSQueueType.PARENT).add(queueName);
}
queueAcls.put(queueName, acls);
if (maxQueueResources.containsKey(queueName) &&
minQueueResources.containsKey(queueName)
&& !Resources.fitsIn(minQueueResources.get(queueName),
maxQueueResources.get(queueName))) {
LOG.warn(
String.format(
"Queue %s has max resources %s less than min resources %s",
queueName, maxQueueResources.get(queueName),
minQueueResources.get(queueName)));
}
}
public interface Listener {
public void onReload(AllocationConfiguration info);
}
}
| 23,610 | 42.164534 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.TreeSet;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Unstable
public class FSLeafQueue extends FSQueue {
private static final Log LOG = LogFactory.getLog(
FSLeafQueue.class.getName());
private final List<FSAppAttempt> runnableApps = // apps that are runnable
new ArrayList<FSAppAttempt>();
private final List<FSAppAttempt> nonRunnableApps =
new ArrayList<FSAppAttempt>();
// get a lock with fair distribution for app list updates
private final ReadWriteLock rwl = new ReentrantReadWriteLock(true);
private final Lock readLock = rwl.readLock();
private final Lock writeLock = rwl.writeLock();
private Resource demand = Resources.createResource(0);
// Variables used for preemption
private long lastTimeAtMinShare;
private long lastTimeAtFairShareThreshold;
// Track the AM resource usage for this queue
private Resource amResourceUsage;
private final ActiveUsersManager activeUsersManager;
public FSLeafQueue(String name, FairScheduler scheduler,
FSParentQueue parent) {
super(name, scheduler, parent);
this.lastTimeAtMinShare = scheduler.getClock().getTime();
this.lastTimeAtFairShareThreshold = scheduler.getClock().getTime();
activeUsersManager = new ActiveUsersManager(getMetrics());
amResourceUsage = Resource.newInstance(0, 0);
}
public void addApp(FSAppAttempt app, boolean runnable) {
writeLock.lock();
try {
if (runnable) {
runnableApps.add(app);
} else {
nonRunnableApps.add(app);
}
} finally {
writeLock.unlock();
}
}
// for testing
void addAppSchedulable(FSAppAttempt appSched) {
writeLock.lock();
try {
runnableApps.add(appSched);
} finally {
writeLock.unlock();
}
}
/**
* Removes the given app from this queue.
* @return whether or not the app was runnable
*/
public boolean removeApp(FSAppAttempt app) {
boolean runnable = false;
// Remove app from runnable/nonRunnable list while holding the write lock
writeLock.lock();
try {
runnable = runnableApps.remove(app);
if (!runnable) {
// removeNonRunnableApp acquires the write lock again, which is fine
if (!removeNonRunnableApp(app)) {
throw new IllegalStateException("Given app to remove " + app +
" does not exist in queue " + this);
}
}
} finally {
writeLock.unlock();
}
// Update AM resource usage if needed. If isAMRunning is true, we're not
// running an unmanaged AM.
if (runnable && app.isAmRunning()) {
Resources.subtractFrom(amResourceUsage, app.getAMResource());
}
return runnable;
}
/**
* Removes the given app if it is non-runnable and belongs to this queue
* @return true if the app is removed, false otherwise
*/
public boolean removeNonRunnableApp(FSAppAttempt app) {
writeLock.lock();
try {
return nonRunnableApps.remove(app);
} finally {
writeLock.unlock();
}
}
public boolean isRunnableApp(FSAppAttempt attempt) {
readLock.lock();
try {
return runnableApps.contains(attempt);
} finally {
readLock.unlock();
}
}
public boolean isNonRunnableApp(FSAppAttempt attempt) {
readLock.lock();
try {
return nonRunnableApps.contains(attempt);
} finally {
readLock.unlock();
}
}
public void resetPreemptedResources() {
readLock.lock();
try {
for (FSAppAttempt attempt : runnableApps) {
attempt.resetPreemptedResources();
}
} finally {
readLock.unlock();
}
}
public void clearPreemptedResources() {
readLock.lock();
try {
for (FSAppAttempt attempt : runnableApps) {
attempt.clearPreemptedResources();
}
} finally {
readLock.unlock();
}
}
public List<FSAppAttempt> getCopyOfNonRunnableAppSchedulables() {
List<FSAppAttempt> appsToReturn = new ArrayList<FSAppAttempt>();
readLock.lock();
try {
appsToReturn.addAll(nonRunnableApps);
} finally {
readLock.unlock();
}
return appsToReturn;
}
@Override
public void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps) {
readLock.lock();
try {
for (FSAppAttempt appSched : runnableApps) {
apps.add(appSched.getApplicationAttemptId());
}
for (FSAppAttempt appSched : nonRunnableApps) {
apps.add(appSched.getApplicationAttemptId());
}
} finally {
readLock.unlock();
}
}
@Override
public void setPolicy(SchedulingPolicy policy)
throws AllocationConfigurationException {
if (!SchedulingPolicy.isApplicableTo(policy, SchedulingPolicy.DEPTH_LEAF)) {
throwPolicyDoesnotApplyException(policy);
}
super.policy = policy;
}
@Override
public void recomputeShares() {
readLock.lock();
try {
policy.computeShares(runnableApps, getFairShare());
} finally {
readLock.unlock();
}
}
@Override
public Resource getDemand() {
return demand;
}
@Override
public Resource getResourceUsage() {
Resource usage = Resources.createResource(0);
readLock.lock();
try {
for (FSAppAttempt app : runnableApps) {
Resources.addTo(usage, app.getResourceUsage());
}
for (FSAppAttempt app : nonRunnableApps) {
Resources.addTo(usage, app.getResourceUsage());
}
} finally {
readLock.unlock();
}
return usage;
}
public Resource getAmResourceUsage() {
return amResourceUsage;
}
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
demand = Resources.createResource(0);
readLock.lock();
try {
for (FSAppAttempt sched : runnableApps) {
if (Resources.equals(demand, maxRes)) {
break;
}
updateDemandForApp(sched, maxRes);
}
for (FSAppAttempt sched : nonRunnableApps) {
if (Resources.equals(demand, maxRes)) {
break;
}
updateDemandForApp(sched, maxRes);
}
} finally {
readLock.unlock();
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand
+ "; the max is " + maxRes);
LOG.debug("The updated fairshare for " + getName() + " is "
+ getFairShare());
}
}
private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) {
sched.updateDemand();
Resource toAdd = sched.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
+ "; Total resource consumption for " + getName() + " now "
+ demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
}
@Override
public Resource assignContainer(FSSchedulerNode node) {
Resource assigned = Resources.none();
if (LOG.isDebugEnabled()) {
LOG.debug("Node " + node.getNodeName() + " offered to queue: " +
getName() + " fairShare: " + getFairShare());
}
if (!assignContainerPreCheck(node)) {
return assigned;
}
// Apps that have resource demands.
TreeSet<FSAppAttempt> pendingForResourceApps =
new TreeSet<FSAppAttempt>(policy.getComparator());
readLock.lock();
try {
for (FSAppAttempt app : runnableApps) {
Resource pending = app.getAppAttemptResourceUsage().getPending();
if (!pending.equals(Resources.none())) {
pendingForResourceApps.add(app);
}
}
} finally {
readLock.unlock();
}
for (FSAppAttempt sched : pendingForResourceApps) {
if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
continue;
}
assigned = sched.assignContainer(node);
if (!assigned.equals(Resources.none())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Assigned container in queue:" + getName() + " " +
"container:" + assigned);
}
break;
}
}
return assigned;
}
@Override
public RMContainer preemptContainer() {
RMContainer toBePreempted = null;
// If this queue is not over its fair share, reject
if (!preemptContainerPreCheck()) {
return toBePreempted;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Queue " + getName() + " is going to preempt a container " +
"from its applications.");
}
// Choose the app that is most over fair share
Comparator<Schedulable> comparator = policy.getComparator();
FSAppAttempt candidateSched = null;
readLock.lock();
try {
for (FSAppAttempt sched : runnableApps) {
if (candidateSched == null ||
comparator.compare(sched, candidateSched) > 0) {
candidateSched = sched;
}
}
} finally {
readLock.unlock();
}
// Preempt from the selected app
if (candidateSched != null) {
toBePreempted = candidateSched.preemptContainer();
}
return toBePreempted;
}
@Override
public List<FSQueue> getChildQueues() {
return new ArrayList<FSQueue>(1);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user) {
QueueUserACLInfo userAclInfo =
recordFactory.newRecordInstance(QueueUserACLInfo.class);
List<QueueACL> operations = new ArrayList<QueueACL>();
for (QueueACL operation : QueueACL.values()) {
if (hasAccess(operation, user)) {
operations.add(operation);
}
}
userAclInfo.setQueueName(getQueueName());
userAclInfo.setUserAcls(operations);
return Collections.singletonList(userAclInfo);
}
public long getLastTimeAtMinShare() {
return lastTimeAtMinShare;
}
private void setLastTimeAtMinShare(long lastTimeAtMinShare) {
this.lastTimeAtMinShare = lastTimeAtMinShare;
}
public long getLastTimeAtFairShareThreshold() {
return lastTimeAtFairShareThreshold;
}
private void setLastTimeAtFairShareThreshold(
long lastTimeAtFairShareThreshold) {
this.lastTimeAtFairShareThreshold = lastTimeAtFairShareThreshold;
}
@Override
public int getNumRunnableApps() {
readLock.lock();
try {
return runnableApps.size();
} finally {
readLock.unlock();
}
}
public int getNumNonRunnableApps() {
readLock.lock();
try {
return nonRunnableApps.size();
} finally {
readLock.unlock();
}
}
public int getNumPendingApps() {
int numPendingApps = 0;
readLock.lock();
try {
for (FSAppAttempt attempt : runnableApps) {
if (attempt.isPending()) {
numPendingApps++;
}
}
numPendingApps += nonRunnableApps.size();
} finally {
readLock.unlock();
}
return numPendingApps;
}
/**
* TODO: Based on how frequently this is called, we might want to club
* counting pending and active apps in the same method.
*/
public int getNumActiveApps() {
int numActiveApps = 0;
readLock.lock();
try {
for (FSAppAttempt attempt : runnableApps) {
if (!attempt.isPending()) {
numActiveApps++;
}
}
} finally {
readLock.unlock();
}
return numActiveApps;
}
@Override
public ActiveUsersManager getActiveUsersManager() {
return activeUsersManager;
}
/**
* Check whether this queue can run this application master under the
* maxAMShare limit
*
* @param amResource
* @return true if this queue can run
*/
public boolean canRunAppAM(Resource amResource) {
float maxAMShare =
scheduler.getAllocationConfiguration().getQueueMaxAMShare(getName());
if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
return true;
}
Resource maxAMResource = Resources.multiply(getFairShare(), maxAMShare);
Resource ifRunAMResource = Resources.add(amResourceUsage, amResource);
return !policy
.checkIfAMResourceUsageOverLimit(ifRunAMResource, maxAMResource);
}
public void addAMResourceUsage(Resource amResource) {
if (amResource != null) {
Resources.addTo(amResourceUsage, amResource);
}
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) {
// TODO Auto-generated method stub
}
/**
* Update the preemption fields for the queue, i.e. the times since last was
* at its guaranteed share and over its fair share threshold.
*/
public void updateStarvationStats() {
long now = scheduler.getClock().getTime();
if (!isStarvedForMinShare()) {
setLastTimeAtMinShare(now);
}
if (!isStarvedForFairShare()) {
setLastTimeAtFairShareThreshold(now);
}
}
/** Allows setting weight for a dynamically created queue
* Currently only used for reservation based queues
* @param weight queue weight
*/
public void setWeights(float weight) {
scheduler.getAllocationConfiguration().setQueueWeight(getName(),
new ResourceWeights(weight));
}
/**
* Helper method to check if the queue should preempt containers
*
* @return true if check passes (can preempt) or false otherwise
*/
private boolean preemptContainerPreCheck() {
return parent.getPolicy().checkIfUsageOverFairShare(getResourceUsage(),
getFairShare());
}
/**
* Is a queue being starved for its min share.
*/
@VisibleForTesting
boolean isStarvedForMinShare() {
return isStarved(getMinShare());
}
/**
* Is a queue being starved for its fair share threshold.
*/
@VisibleForTesting
boolean isStarvedForFairShare() {
return isStarved(
Resources.multiply(getFairShare(), getFairSharePreemptionThreshold()));
}
private boolean isStarved(Resource share) {
Resource desiredShare = Resources.min(policy.getResourceCalculator(),
scheduler.getClusterResource(), share, getDemand());
Resource resourceUsage = getResourceUsage();
return Resources.lessThan(policy.getResourceCalculator(),
scheduler.getClusterResource(), resourceUsage, desiredShare);
}
}
| 16,496 | 27.942105 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import java.util.Collection;
import java.util.Comparator;
import java.util.concurrent.ConcurrentHashMap;
@Public
@Evolving
public abstract class SchedulingPolicy {
private static final ConcurrentHashMap<Class<? extends SchedulingPolicy>, SchedulingPolicy> instances =
new ConcurrentHashMap<Class<? extends SchedulingPolicy>, SchedulingPolicy>();
public static final SchedulingPolicy DEFAULT_POLICY =
getInstance(FairSharePolicy.class);
public static final byte DEPTH_LEAF = (byte) 1;
public static final byte DEPTH_INTERMEDIATE = (byte) 2;
public static final byte DEPTH_ROOT = (byte) 4;
public static final byte DEPTH_PARENT = (byte) 6; // Root and Intermediate
public static final byte DEPTH_ANY = (byte) 7;
/**
* Returns a {@link SchedulingPolicy} instance corresponding to the passed clazz
*/
public static SchedulingPolicy getInstance(Class<? extends SchedulingPolicy> clazz) {
SchedulingPolicy policy = ReflectionUtils.newInstance(clazz, null);
SchedulingPolicy policyRet = instances.putIfAbsent(clazz, policy);
if(policyRet != null) {
return policyRet;
}
return policy;
}
/**
* Returns {@link SchedulingPolicy} instance corresponding to the
* {@link SchedulingPolicy} passed as a string. The policy can be "fair" for
* FairSharePolicy, "fifo" for FifoPolicy, or "drf" for
* DominantResourceFairnessPolicy. For a custom
* {@link SchedulingPolicy}s in the RM classpath, the policy should be
* canonical class name of the {@link SchedulingPolicy}.
*
* @param policy canonical class name or "drf" or "fair" or "fifo"
* @throws AllocationConfigurationException
*/
@SuppressWarnings("unchecked")
public static SchedulingPolicy parse(String policy)
throws AllocationConfigurationException {
@SuppressWarnings("rawtypes")
Class clazz;
String text = StringUtils.toLowerCase(policy);
if (text.equalsIgnoreCase(FairSharePolicy.NAME)) {
clazz = FairSharePolicy.class;
} else if (text.equalsIgnoreCase(FifoPolicy.NAME)) {
clazz = FifoPolicy.class;
} else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) {
clazz = DominantResourceFairnessPolicy.class;
} else {
try {
clazz = Class.forName(policy);
} catch (ClassNotFoundException cnfe) {
throw new AllocationConfigurationException(policy
+ " SchedulingPolicy class not found!");
}
}
if (!SchedulingPolicy.class.isAssignableFrom(clazz)) {
throw new AllocationConfigurationException(policy
+ " does not extend SchedulingPolicy");
}
return getInstance(clazz);
}
public void initialize(Resource clusterCapacity) {}
/**
* The {@link ResourceCalculator} returned by this method should be used
* for any calculations involving resources.
*
* @return ResourceCalculator instance to use
*/
public abstract ResourceCalculator getResourceCalculator();
/**
* @return returns the name of {@link SchedulingPolicy}
*/
public abstract String getName();
/**
* Specifies the depths in the hierarchy, this {@link SchedulingPolicy}
* applies to
*
* @return depth equal to one of fields {@link SchedulingPolicy}#DEPTH_*
*/
public abstract byte getApplicableDepth();
/**
* Checks if the specified {@link SchedulingPolicy} can be used for a queue at
* the specified depth in the hierarchy
*
* @param policy {@link SchedulingPolicy} we are checking the
* depth-applicability for
* @param depth queue's depth in the hierarchy
* @return true if policy is applicable to passed depth, false otherwise
*/
public static boolean isApplicableTo(SchedulingPolicy policy, byte depth) {
return ((policy.getApplicableDepth() & depth) == depth) ? true : false;
}
/**
* The comparator returned by this method is to be used for sorting the
* {@link Schedulable}s in that queue.
*
* @return the comparator to sort by
*/
public abstract Comparator<Schedulable> getComparator();
/**
* Computes and updates the shares of {@link Schedulable}s as per
* the {@link SchedulingPolicy}, to be used later for scheduling decisions.
* The shares computed are instantaneous and only consider queues with
* running applications.
*
* @param schedulables {@link Schedulable}s whose shares are to be updated
* @param totalResources Total {@link Resource}s in the cluster
*/
public abstract void computeShares(
Collection<? extends Schedulable> schedulables, Resource totalResources);
/**
* Computes and updates the steady shares of {@link FSQueue}s as per the
* {@link SchedulingPolicy}. The steady share does not differentiate
* between queues with and without running applications under them. The
* steady share is not used for scheduling, it is displayed on the Web UI
* for better visibility.
*
* @param queues {@link FSQueue}s whose shares are to be updated
* @param totalResources Total {@link Resource}s in the cluster
*/
public abstract void computeSteadyShares(
Collection<? extends FSQueue> queues, Resource totalResources);
/**
* Check if the resource usage is over the fair share under this policy
*
* @param usage {@link Resource} the resource usage
* @param fairShare {@link Resource} the fair share
* @return true if check passes (is over) or false otherwise
*/
public abstract boolean checkIfUsageOverFairShare(
Resource usage, Resource fairShare);
/**
* Check if a leaf queue's AM resource usage over its limit under this policy
*
* @param usage {@link Resource} the resource used by application masters
* @param maxAMResource {@link Resource} the maximum allowed resource for
* application masters
* @return true if AM resource usage is over the limit
*/
public abstract boolean checkIfAMResourceUsageOverLimit(
Resource usage, Resource maxAMResource);
/**
* Get headroom by calculating the min of <code>clusterAvailable</code> and
* (<code>queueFairShare</code> - <code>queueUsage</code>) resources that are
* applicable to this policy. For eg if only memory then leave other
* resources such as CPU to same as clusterAvailable.
*
* @param queueFairShare fairshare in the queue
* @param queueUsage resources used in the queue
* @param maxAvailable available resource in cluster for this queue
* @return calculated headroom
*/
public abstract Resource getHeadroom(Resource queueFairShare,
Resource queueUsage, Resource maxAvailable);
}
| 8,147 | 38.553398 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
* Class to capture the performance metrics of FairScheduler.
* This should be a singleton.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
@Metrics(context="fairscheduler-op-durations")
public class FSOpDurations implements MetricsSource {
@Metric("Duration for a continuous scheduling run")
MutableRate continuousSchedulingRun;
@Metric("Duration to handle a node update")
MutableRate nodeUpdateCall;
@Metric("Duration for a update thread run")
MutableRate updateThreadRun;
@Metric("Duration for an update call")
MutableRate updateCall;
@Metric("Duration for a preempt call")
MutableRate preemptCall;
private static final MetricsInfo RECORD_INFO =
info("FSOpDurations", "Durations of FairScheduler calls or thread-runs");
private final MetricsRegistry registry;
private boolean isExtended = false;
private static final FSOpDurations INSTANCE = new FSOpDurations();
public static FSOpDurations getInstance(boolean isExtended) {
INSTANCE.setExtended(isExtended);
return INSTANCE;
}
private FSOpDurations() {
registry = new MetricsRegistry(RECORD_INFO);
registry.tag(RECORD_INFO, "FSOpDurations");
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms != null) {
ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this);
}
}
private synchronized void setExtended(boolean isExtended) {
if (isExtended == INSTANCE.isExtended)
return;
continuousSchedulingRun.setExtended(isExtended);
nodeUpdateCall.setExtended(isExtended);
updateThreadRun.setExtended(isExtended);
updateCall.setExtended(isExtended);
preemptCall.setExtended(isExtended);
INSTANCE.isExtended = isExtended;
}
@Override
public synchronized void getMetrics(MetricsCollector collector, boolean all) {
registry.snapshot(collector.addRecord(registry.info()), all);
}
public void addContinuousSchedulingRunDuration(long value) {
continuousSchedulingRun.add(value);
}
public void addNodeUpdateDuration(long value) {
nodeUpdateCall.add(value);
}
public void addUpdateThreadRunDuration(long value) {
updateThreadRun.add(value);
}
public void addUpdateCallDuration(long value) {
updateCall.add(value);
}
public void addPreemptCallDuration(long value) {
preemptCall.add(value);
}
@VisibleForTesting
public boolean hasUpdateThreadRunChanged() {
return updateThreadRun.changed();
}
}
| 4,041 | 31.079365 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
public enum FSQueueType {
/*
* Represents a leaf queue
*/
LEAF,
/*
* Represents a parent queue
*/
PARENT
}
| 1,011 | 30.625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ReservationQueueConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class ReservationQueueConfiguration {
private long reservationWindow;
private long enforcementWindow;
private String reservationAdmissionPolicy;
private String reservationAgent;
private String planner;
private boolean showReservationAsQueues;
private boolean moveOnExpiry;
private float avgOverTimeMultiplier;
private float maxOverTimeMultiplier;
public ReservationQueueConfiguration() {
this.reservationWindow = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_WINDOW;
this.enforcementWindow = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_ENFORCEMENT_WINDOW;
this.reservationAdmissionPolicy = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_ADMISSION_POLICY;
this.reservationAgent = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_AGENT_NAME;
this.planner = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_PLANNER_NAME;
this.showReservationAsQueues = ReservationSchedulerConfiguration
.DEFAULT_SHOW_RESERVATIONS_AS_QUEUES;
this.moveOnExpiry = ReservationSchedulerConfiguration
.DEFAULT_RESERVATION_MOVE_ON_EXPIRY;
this.avgOverTimeMultiplier = ReservationSchedulerConfiguration
.DEFAULT_CAPACITY_OVER_TIME_MULTIPLIER;
this.maxOverTimeMultiplier = ReservationSchedulerConfiguration
.DEFAULT_CAPACITY_OVER_TIME_MULTIPLIER;
}
public long getReservationWindowMsec() {
return reservationWindow;
}
public long getEnforcementWindowMsec() {
return enforcementWindow;
}
public boolean shouldShowReservationAsQueues() {
return showReservationAsQueues;
}
public boolean shouldMoveOnExpiry() {
return moveOnExpiry;
}
public String getReservationAdmissionPolicy() {
return reservationAdmissionPolicy;
}
public String getReservationAgent() {
return reservationAgent;
}
public String getPlanner() {
return planner;
}
public float getAvgOverTimeMultiplier() {
return avgOverTimeMultiplier;
}
public float getMaxOverTimeMultiplier() {
return maxOverTimeMultiplier;
}
public void setPlanner(String planner) {
this.planner = planner;
}
public void setReservationAdmissionPolicy(String reservationAdmissionPolicy) {
this.reservationAdmissionPolicy = reservationAdmissionPolicy;
}
public void setReservationAgent(String reservationAgent) {
this.reservationAgent = reservationAgent;
}
@VisibleForTesting
public void setReservationWindow(long reservationWindow) {
this.reservationWindow = reservationWindow;
}
@VisibleForTesting
public void setAverageCapacity(int averageCapacity) {
this.avgOverTimeMultiplier = averageCapacity;
}
}
| 3,924 | 32.547009 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
/**
* Handles tracking and enforcement for user and queue maxRunningApps
* constraints
*/
public class MaxRunningAppsEnforcer {
private static final Log LOG = LogFactory.getLog(FairScheduler.class);
private final FairScheduler scheduler;
// Tracks the number of running applications by user.
private final Map<String, Integer> usersNumRunnableApps;
@VisibleForTesting
final ListMultimap<String, FSAppAttempt> usersNonRunnableApps;
public MaxRunningAppsEnforcer(FairScheduler scheduler) {
this.scheduler = scheduler;
this.usersNumRunnableApps = new HashMap<String, Integer>();
this.usersNonRunnableApps = ArrayListMultimap.create();
}
/**
* Checks whether making the application runnable would exceed any
* maxRunningApps limits.
*/
public boolean canAppBeRunnable(FSQueue queue, String user) {
AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
Integer userNumRunnable = usersNumRunnableApps.get(user);
if (userNumRunnable == null) {
userNumRunnable = 0;
}
if (userNumRunnable >= allocConf.getUserMaxApps(user)) {
return false;
}
// Check queue and all parent queues
while (queue != null) {
int queueMaxApps = allocConf.getQueueMaxApps(queue.getName());
if (queue.getNumRunnableApps() >= queueMaxApps) {
return false;
}
queue = queue.getParent();
}
return true;
}
/**
* Tracks the given new runnable app for purposes of maintaining max running
* app limits.
*/
public void trackRunnableApp(FSAppAttempt app) {
String user = app.getUser();
FSLeafQueue queue = app.getQueue();
// Increment running counts for all parent queues
FSParentQueue parent = queue.getParent();
while (parent != null) {
parent.incrementRunnableApps();
parent = parent.getParent();
}
Integer userNumRunnable = usersNumRunnableApps.get(user);
usersNumRunnableApps.put(user, (userNumRunnable == null ? 0
: userNumRunnable) + 1);
}
/**
* Tracks the given new non runnable app so that it can be made runnable when
* it would not violate max running app limits.
*/
public void trackNonRunnableApp(FSAppAttempt app) {
String user = app.getUser();
usersNonRunnableApps.put(user, app);
}
/**
* This is called after reloading the allocation configuration when the
* scheduler is reinitilized
*
* Checks to see whether any non-runnable applications become runnable
* now that the max running apps of given queue has been changed
*
* Runs in O(n) where n is the number of apps that are non-runnable and in
* the queues that went from having no slack to having slack.
*/
public void updateRunnabilityOnReload() {
FSParentQueue rootQueue = scheduler.getQueueManager().getRootQueue();
List<List<FSAppAttempt>> appsNowMaybeRunnable =
new ArrayList<List<FSAppAttempt>>();
gatherPossiblyRunnableAppLists(rootQueue, appsNowMaybeRunnable);
updateAppsRunnability(appsNowMaybeRunnable, Integer.MAX_VALUE);
}
/**
* Checks to see whether any other applications runnable now that the given
* application has been removed from the given queue. And makes them so.
*
* Runs in O(n log(n)) where n is the number of queues that are under the
* highest queue that went from having no slack to having slack.
*/
public void updateRunnabilityOnAppRemoval(FSAppAttempt app, FSLeafQueue queue) {
AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
// childqueueX might have no pending apps itself, but if a queue higher up
// in the hierarchy parentqueueY has a maxRunningApps set, an app completion
// in childqueueX could allow an app in some other distant child of
// parentqueueY to become runnable.
// An app removal will only possibly allow another app to become runnable if
// the queue was already at its max before the removal.
// Thus we find the ancestor queue highest in the tree for which the app
// that was at its maxRunningApps before the removal.
FSQueue highestQueueWithAppsNowRunnable = (queue.getNumRunnableApps() ==
allocConf.getQueueMaxApps(queue.getName()) - 1) ? queue : null;
FSParentQueue parent = queue.getParent();
while (parent != null) {
if (parent.getNumRunnableApps() == allocConf.getQueueMaxApps(parent
.getName()) - 1) {
highestQueueWithAppsNowRunnable = parent;
}
parent = parent.getParent();
}
List<List<FSAppAttempt>> appsNowMaybeRunnable =
new ArrayList<List<FSAppAttempt>>();
// Compile lists of apps which may now be runnable
// We gather lists instead of building a set of all non-runnable apps so
// that this whole operation can be O(number of queues) instead of
// O(number of apps)
if (highestQueueWithAppsNowRunnable != null) {
gatherPossiblyRunnableAppLists(highestQueueWithAppsNowRunnable,
appsNowMaybeRunnable);
}
String user = app.getUser();
Integer userNumRunning = usersNumRunnableApps.get(user);
if (userNumRunning == null) {
userNumRunning = 0;
}
if (userNumRunning == allocConf.getUserMaxApps(user) - 1) {
List<FSAppAttempt> userWaitingApps = usersNonRunnableApps.get(user);
if (userWaitingApps != null) {
appsNowMaybeRunnable.add(userWaitingApps);
}
}
updateAppsRunnability(appsNowMaybeRunnable,
appsNowMaybeRunnable.size());
}
/**
* Checks to see whether applications are runnable now by iterating
* through each one of them and check if the queue and user have slack
*
* if we know how many apps can be runnable, there is no need to iterate
* through all apps, maxRunnableApps is used to break out of the iteration
*/
private void updateAppsRunnability(List<List<FSAppAttempt>>
appsNowMaybeRunnable, int maxRunnableApps) {
// Scan through and check whether this means that any apps are now runnable
Iterator<FSAppAttempt> iter = new MultiListStartTimeIterator(
appsNowMaybeRunnable);
FSAppAttempt prev = null;
List<FSAppAttempt> noLongerPendingApps = new ArrayList<FSAppAttempt>();
while (iter.hasNext()) {
FSAppAttempt next = iter.next();
if (next == prev) {
continue;
}
if (canAppBeRunnable(next.getQueue(), next.getUser())) {
trackRunnableApp(next);
FSAppAttempt appSched = next;
next.getQueue().addApp(appSched, true);
noLongerPendingApps.add(appSched);
if (noLongerPendingApps.size() >= maxRunnableApps) {
break;
}
}
prev = next;
}
// We remove the apps from their pending lists afterwards so that we don't
// pull them out from under the iterator. If they are not in these lists
// in the first place, there is a bug.
for (FSAppAttempt appSched : noLongerPendingApps) {
if (!appSched.getQueue().removeNonRunnableApp(appSched)) {
LOG.error("Can't make app runnable that does not already exist in queue"
+ " as non-runnable: " + appSched + ". This should never happen.");
}
if (!usersNonRunnableApps.remove(appSched.getUser(), appSched)) {
LOG.error("Waiting app " + appSched + " expected to be in "
+ "usersNonRunnableApps, but was not. This should never happen.");
}
}
}
/**
* Updates the relevant tracking variables after a runnable app with the given
* queue and user has been removed.
*/
public void untrackRunnableApp(FSAppAttempt app) {
// Update usersRunnableApps
String user = app.getUser();
int newUserNumRunning = usersNumRunnableApps.get(user) - 1;
if (newUserNumRunning == 0) {
usersNumRunnableApps.remove(user);
} else {
usersNumRunnableApps.put(user, newUserNumRunning);
}
// Update runnable app bookkeeping for queues
FSLeafQueue queue = app.getQueue();
FSParentQueue parent = queue.getParent();
while (parent != null) {
parent.decrementRunnableApps();
parent = parent.getParent();
}
}
/**
* Stops tracking the given non-runnable app
*/
public void untrackNonRunnableApp(FSAppAttempt app) {
usersNonRunnableApps.remove(app.getUser(), app);
}
/**
* Traverses the queue hierarchy under the given queue to gather all lists
* of non-runnable applications.
*/
private void gatherPossiblyRunnableAppLists(FSQueue queue,
List<List<FSAppAttempt>> appLists) {
if (queue.getNumRunnableApps() < scheduler.getAllocationConfiguration()
.getQueueMaxApps(queue.getName())) {
if (queue instanceof FSLeafQueue) {
appLists.add(
((FSLeafQueue)queue).getCopyOfNonRunnableAppSchedulables());
} else {
for (FSQueue child : queue.getChildQueues()) {
gatherPossiblyRunnableAppLists(child, appLists);
}
}
}
}
/**
* Takes a list of lists, each of which is ordered by start time, and returns
* their elements in order of start time.
*
* We maintain positions in each of the lists. Each next() call advances
* the position in one of the lists. We maintain a heap that orders lists
* by the start time of the app in the current position in that list.
* This allows us to pick which list to advance in O(log(num lists)) instead
* of O(num lists) time.
*/
static class MultiListStartTimeIterator implements
Iterator<FSAppAttempt> {
private List<FSAppAttempt>[] appLists;
private int[] curPositionsInAppLists;
private PriorityQueue<IndexAndTime> appListsByCurStartTime;
@SuppressWarnings("unchecked")
public MultiListStartTimeIterator(List<List<FSAppAttempt>> appListList) {
appLists = appListList.toArray(new List[appListList.size()]);
curPositionsInAppLists = new int[appLists.length];
appListsByCurStartTime = new PriorityQueue<IndexAndTime>();
for (int i = 0; i < appLists.length; i++) {
long time = appLists[i].isEmpty() ? Long.MAX_VALUE : appLists[i].get(0)
.getStartTime();
appListsByCurStartTime.add(new IndexAndTime(i, time));
}
}
@Override
public boolean hasNext() {
return !appListsByCurStartTime.isEmpty()
&& appListsByCurStartTime.peek().time != Long.MAX_VALUE;
}
@Override
public FSAppAttempt next() {
IndexAndTime indexAndTime = appListsByCurStartTime.remove();
int nextListIndex = indexAndTime.index;
FSAppAttempt next = appLists[nextListIndex]
.get(curPositionsInAppLists[nextListIndex]);
curPositionsInAppLists[nextListIndex]++;
if (curPositionsInAppLists[nextListIndex] < appLists[nextListIndex].size()) {
indexAndTime.time = appLists[nextListIndex]
.get(curPositionsInAppLists[nextListIndex]).getStartTime();
} else {
indexAndTime.time = Long.MAX_VALUE;
}
appListsByCurStartTime.add(indexAndTime);
return next;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove not supported");
}
private static class IndexAndTime implements Comparable<IndexAndTime> {
public int index;
public long time;
public IndexAndTime(int index, long time) {
this.index = index;
this.time = time;
}
@Override
public int compareTo(IndexAndTime o) {
return time < o.time ? -1 : (time > o.time ? 1 : 0);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof IndexAndTime)) {
return false;
}
IndexAndTime other = (IndexAndTime)o;
return other.time == time;
}
@Override
public int hashCode() {
return (int)time;
}
}
}
}
| 13,124 | 34.472973 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueStatistics;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Unstable
public abstract class FSQueue implements Queue, Schedulable {
private static final Log LOG = LogFactory.getLog(
FSQueue.class.getName());
private Resource fairShare = Resources.createResource(0, 0);
private Resource steadyFairShare = Resources.createResource(0, 0);
private final String name;
protected final FairScheduler scheduler;
private final FSQueueMetrics metrics;
protected final FSParentQueue parent;
protected final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
protected SchedulingPolicy policy = SchedulingPolicy.DEFAULT_POLICY;
private long fairSharePreemptionTimeout = Long.MAX_VALUE;
private long minSharePreemptionTimeout = Long.MAX_VALUE;
private float fairSharePreemptionThreshold = 0.5f;
public FSQueue(String name, FairScheduler scheduler, FSParentQueue parent) {
this.name = name;
this.scheduler = scheduler;
this.metrics = FSQueueMetrics.forQueue(getName(), parent, true, scheduler.getConf());
metrics.setMinShare(getMinShare());
metrics.setMaxShare(getMaxShare());
this.parent = parent;
}
public String getName() {
return name;
}
@Override
public String getQueueName() {
return name;
}
public SchedulingPolicy getPolicy() {
return policy;
}
public FSParentQueue getParent() {
return parent;
}
protected void throwPolicyDoesnotApplyException(SchedulingPolicy policy)
throws AllocationConfigurationException {
throw new AllocationConfigurationException("SchedulingPolicy " + policy
+ " does not apply to queue " + getName());
}
public abstract void setPolicy(SchedulingPolicy policy)
throws AllocationConfigurationException;
@Override
public ResourceWeights getWeights() {
return scheduler.getAllocationConfiguration().getQueueWeight(getName());
}
@Override
public Resource getMinShare() {
return scheduler.getAllocationConfiguration().getMinResources(getName());
}
@Override
public Resource getMaxShare() {
return scheduler.getAllocationConfiguration().getMaxResources(getName());
}
@Override
public long getStartTime() {
return 0;
}
@Override
public Priority getPriority() {
Priority p = recordFactory.newRecordInstance(Priority.class);
p.setPriority(1);
return p;
}
@Override
public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
queueInfo.setQueueName(getQueueName());
if (scheduler.getClusterResource().getMemory() == 0) {
queueInfo.setCapacity(0.0f);
} else {
queueInfo.setCapacity((float) getFairShare().getMemory() /
scheduler.getClusterResource().getMemory());
}
if (getFairShare().getMemory() == 0) {
queueInfo.setCurrentCapacity(0.0f);
} else {
queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() /
getFairShare().getMemory());
}
ArrayList<QueueInfo> childQueueInfos = new ArrayList<QueueInfo>();
if (includeChildQueues) {
Collection<FSQueue> childQueues = getChildQueues();
for (FSQueue child : childQueues) {
childQueueInfos.add(child.getQueueInfo(recursive, recursive));
}
}
queueInfo.setChildQueues(childQueueInfos);
queueInfo.setQueueState(QueueState.RUNNING);
queueInfo.setQueueStatistics(getQueueStatistics());
return queueInfo;
}
public QueueStatistics getQueueStatistics() {
QueueStatistics stats =
recordFactory.newRecordInstance(QueueStatistics.class);
stats.setNumAppsSubmitted(getMetrics().getAppsSubmitted());
stats.setNumAppsRunning(getMetrics().getAppsRunning());
stats.setNumAppsPending(getMetrics().getAppsPending());
stats.setNumAppsCompleted(getMetrics().getAppsCompleted());
stats.setNumAppsKilled(getMetrics().getAppsKilled());
stats.setNumAppsFailed(getMetrics().getAppsFailed());
stats.setNumActiveUsers(getMetrics().getActiveUsers());
stats.setAvailableMemoryMB(getMetrics().getAvailableMB());
stats.setAllocatedMemoryMB(getMetrics().getAllocatedMB());
stats.setPendingMemoryMB(getMetrics().getPendingMB());
stats.setReservedMemoryMB(getMetrics().getReservedMB());
stats.setAvailableVCores(getMetrics().getAvailableVirtualCores());
stats.setAllocatedVCores(getMetrics().getAllocatedVirtualCores());
stats.setPendingVCores(getMetrics().getPendingVirtualCores());
stats.setReservedVCores(getMetrics().getReservedVirtualCores());
return stats;
}
@Override
public FSQueueMetrics getMetrics() {
return metrics;
}
/** Get the fair share assigned to this Schedulable. */
public Resource getFairShare() {
return fairShare;
}
@Override
public void setFairShare(Resource fairShare) {
this.fairShare = fairShare;
metrics.setFairShare(fairShare);
if (LOG.isDebugEnabled()) {
LOG.debug("The updated fairShare for " + getName() + " is " + fairShare);
}
}
/** Get the steady fair share assigned to this Schedulable. */
public Resource getSteadyFairShare() {
return steadyFairShare;
}
public void setSteadyFairShare(Resource steadyFairShare) {
this.steadyFairShare = steadyFairShare;
metrics.setSteadyFairShare(steadyFairShare);
}
public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
return scheduler.getAllocationConfiguration().hasAccess(name, acl, user);
}
public long getFairSharePreemptionTimeout() {
return fairSharePreemptionTimeout;
}
public void setFairSharePreemptionTimeout(long fairSharePreemptionTimeout) {
this.fairSharePreemptionTimeout = fairSharePreemptionTimeout;
}
public long getMinSharePreemptionTimeout() {
return minSharePreemptionTimeout;
}
public void setMinSharePreemptionTimeout(long minSharePreemptionTimeout) {
this.minSharePreemptionTimeout = minSharePreemptionTimeout;
}
public float getFairSharePreemptionThreshold() {
return fairSharePreemptionThreshold;
}
public void setFairSharePreemptionThreshold(float fairSharePreemptionThreshold) {
this.fairSharePreemptionThreshold = fairSharePreemptionThreshold;
}
/**
* Recomputes the shares for all child queues and applications based on this
* queue's current share
*/
public abstract void recomputeShares();
/**
* Update the min/fair share preemption timeouts and threshold for this queue.
*/
public void updatePreemptionVariables() {
// For min share timeout
minSharePreemptionTimeout = scheduler.getAllocationConfiguration()
.getMinSharePreemptionTimeout(getName());
if (minSharePreemptionTimeout == -1 && parent != null) {
minSharePreemptionTimeout = parent.getMinSharePreemptionTimeout();
}
// For fair share timeout
fairSharePreemptionTimeout = scheduler.getAllocationConfiguration()
.getFairSharePreemptionTimeout(getName());
if (fairSharePreemptionTimeout == -1 && parent != null) {
fairSharePreemptionTimeout = parent.getFairSharePreemptionTimeout();
}
// For fair share preemption threshold
fairSharePreemptionThreshold = scheduler.getAllocationConfiguration()
.getFairSharePreemptionThreshold(getName());
if (fairSharePreemptionThreshold < 0 && parent != null) {
fairSharePreemptionThreshold = parent.getFairSharePreemptionThreshold();
}
}
/**
* Gets the children of this queue, if any.
*/
public abstract List<FSQueue> getChildQueues();
/**
* Adds all applications in the queue and its subqueues to the given collection.
* @param apps the collection to add the applications to
*/
public abstract void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps);
/**
* Return the number of apps for which containers can be allocated.
* Includes apps in subqueues.
*/
public abstract int getNumRunnableApps();
/**
* Helper method to check if the queue should attempt assigning resources
*
* @return true if check passes (can assign) or false otherwise
*/
protected boolean assignContainerPreCheck(FSSchedulerNode node) {
if (!Resources.fitsIn(getResourceUsage(),
scheduler.getAllocationConfiguration().getMaxResources(getName()))
|| node.getReservedContainer() != null) {
return false;
}
return true;
}
/**
* Returns true if queue has at least one app running.
*/
public boolean isActive() {
return getNumRunnableApps() > 0;
}
/** Convenient toString implementation for debugging. */
@Override
public String toString() {
return String.format("[%s, demand=%s, running=%s, share=%s, w=%s]",
getName(), getDemand(), getResourceUsage(), fairShare, getWeights());
}
@Override
public Set<String> getAccessibleNodeLabels() {
// TODO, add implementation for FS
return null;
}
@Override
public String getDefaultNodeLabelExpression() {
// TODO, add implementation for FS
return null;
}
@Override
public void incPendingResource(String nodeLabel, Resource resourceToInc) {
}
@Override
public void decPendingResource(String nodeLabel, Resource resourceToDec) {
}
@Override
public Priority getDefaultApplicationPriority() {
// TODO add implementation for FSParentQueue
return null;
}
public boolean fitsInMaxShare(Resource additionalResource) {
Resource usagePlusAddition =
Resources.add(getResourceUsage(), additionalResource);
if (!Resources.fitsIn(usagePlusAddition, getMaxShare())) {
return false;
}
FSQueue parentQueue = getParent();
if (parentQueue != null) {
return parentQueue.fitsInMaxShare(additionalResource);
}
return true;
}
}
| 11,780 | 32.185915 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Evolving
public class FairSchedulerConfiguration extends Configuration {
public static final Log LOG = LogFactory.getLog(
FairSchedulerConfiguration.class.getName());
/** Increment request grant-able by the RM scheduler.
* These properties are looked up in the yarn-site.xml */
public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_MB =
YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-mb";
public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB = 1024;
public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES =
YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-vcores";
public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES = 1;
private static final String CONF_PREFIX = "yarn.scheduler.fair.";
public static final String ALLOCATION_FILE = CONF_PREFIX + "allocation.file";
protected static final String DEFAULT_ALLOCATION_FILE = "fair-scheduler.xml";
/** Whether to enable the Fair Scheduler event log */
public static final String EVENT_LOG_ENABLED = CONF_PREFIX + "event-log-enabled";
public static final boolean DEFAULT_EVENT_LOG_ENABLED = false;
protected static final String EVENT_LOG_DIR = "eventlog.dir";
/** Whether pools can be created that were not specified in the FS configuration file
*/
protected static final String ALLOW_UNDECLARED_POOLS = CONF_PREFIX + "allow-undeclared-pools";
protected static final boolean DEFAULT_ALLOW_UNDECLARED_POOLS = true;
/** Whether to use the user name as the queue name (instead of "default") if
* the request does not specify a queue. */
protected static final String USER_AS_DEFAULT_QUEUE = CONF_PREFIX + "user-as-default-queue";
protected static final boolean DEFAULT_USER_AS_DEFAULT_QUEUE = true;
protected static final float DEFAULT_LOCALITY_THRESHOLD = -1.0f;
/** Cluster threshold for node locality. */
protected static final String LOCALITY_THRESHOLD_NODE = CONF_PREFIX + "locality.threshold.node";
protected static final float DEFAULT_LOCALITY_THRESHOLD_NODE =
DEFAULT_LOCALITY_THRESHOLD;
/** Cluster threshold for rack locality. */
protected static final String LOCALITY_THRESHOLD_RACK = CONF_PREFIX + "locality.threshold.rack";
protected static final float DEFAULT_LOCALITY_THRESHOLD_RACK =
DEFAULT_LOCALITY_THRESHOLD;
/** Delay for node locality. */
protected static final String LOCALITY_DELAY_NODE_MS = CONF_PREFIX + "locality-delay-node-ms";
protected static final long DEFAULT_LOCALITY_DELAY_NODE_MS = -1L;
/** Delay for rack locality. */
protected static final String LOCALITY_DELAY_RACK_MS = CONF_PREFIX + "locality-delay-rack-ms";
protected static final long DEFAULT_LOCALITY_DELAY_RACK_MS = -1L;
/** Enable continuous scheduling or not. */
protected static final String CONTINUOUS_SCHEDULING_ENABLED = CONF_PREFIX + "continuous-scheduling-enabled";
protected static final boolean DEFAULT_CONTINUOUS_SCHEDULING_ENABLED = false;
/** Sleep time of each pass in continuous scheduling (5ms in default) */
protected static final String CONTINUOUS_SCHEDULING_SLEEP_MS = CONF_PREFIX + "continuous-scheduling-sleep-ms";
protected static final int DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS = 5;
/** Whether preemption is enabled. */
protected static final String PREEMPTION = CONF_PREFIX + "preemption";
protected static final boolean DEFAULT_PREEMPTION = false;
protected static final String PREEMPTION_THRESHOLD =
CONF_PREFIX + "preemption.cluster-utilization-threshold";
protected static final float DEFAULT_PREEMPTION_THRESHOLD = 0.8f;
protected static final String PREEMPTION_INTERVAL = CONF_PREFIX + "preemptionInterval";
protected static final int DEFAULT_PREEMPTION_INTERVAL = 5000;
protected static final String WAIT_TIME_BEFORE_KILL = CONF_PREFIX + "waitTimeBeforeKill";
protected static final int DEFAULT_WAIT_TIME_BEFORE_KILL = 15000;
/** Whether to assign multiple containers in one check-in. */
public static final String ASSIGN_MULTIPLE = CONF_PREFIX + "assignmultiple";
protected static final boolean DEFAULT_ASSIGN_MULTIPLE = false;
/** Whether to give more weight to apps requiring many resources. */
protected static final String SIZE_BASED_WEIGHT = CONF_PREFIX + "sizebasedweight";
protected static final boolean DEFAULT_SIZE_BASED_WEIGHT = false;
/** Maximum number of containers to assign on each check-in. */
protected static final String MAX_ASSIGN = CONF_PREFIX + "max.assign";
protected static final int DEFAULT_MAX_ASSIGN = -1;
/** The update interval for calculating resources in FairScheduler .*/
public static final String UPDATE_INTERVAL_MS =
CONF_PREFIX + "update-interval-ms";
public static final int DEFAULT_UPDATE_INTERVAL_MS = 500;
public FairSchedulerConfiguration() {
super();
}
public FairSchedulerConfiguration(Configuration conf) {
super(conf);
}
public Resource getMinimumAllocation() {
int mem = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int cpu = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
return Resources.createResource(mem, cpu);
}
public Resource getMaximumAllocation() {
int mem = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
int cpu = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
return Resources.createResource(mem, cpu);
}
public Resource getIncrementAllocation() {
int incrementMemory = getInt(
RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB);
int incrementCores = getInt(
RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES);
return Resources.createResource(incrementMemory, incrementCores);
}
public float getLocalityThresholdNode() {
return getFloat(LOCALITY_THRESHOLD_NODE, DEFAULT_LOCALITY_THRESHOLD_NODE);
}
public float getLocalityThresholdRack() {
return getFloat(LOCALITY_THRESHOLD_RACK, DEFAULT_LOCALITY_THRESHOLD_RACK);
}
public boolean isContinuousSchedulingEnabled() {
return getBoolean(CONTINUOUS_SCHEDULING_ENABLED, DEFAULT_CONTINUOUS_SCHEDULING_ENABLED);
}
public int getContinuousSchedulingSleepMs() {
return getInt(CONTINUOUS_SCHEDULING_SLEEP_MS, DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
}
public long getLocalityDelayNodeMs() {
return getLong(LOCALITY_DELAY_NODE_MS, DEFAULT_LOCALITY_DELAY_NODE_MS);
}
public long getLocalityDelayRackMs() {
return getLong(LOCALITY_DELAY_RACK_MS, DEFAULT_LOCALITY_DELAY_RACK_MS);
}
public boolean getPreemptionEnabled() {
return getBoolean(PREEMPTION, DEFAULT_PREEMPTION);
}
public float getPreemptionUtilizationThreshold() {
return getFloat(PREEMPTION_THRESHOLD, DEFAULT_PREEMPTION_THRESHOLD);
}
public boolean getAssignMultiple() {
return getBoolean(ASSIGN_MULTIPLE, DEFAULT_ASSIGN_MULTIPLE);
}
public int getMaxAssign() {
return getInt(MAX_ASSIGN, DEFAULT_MAX_ASSIGN);
}
public boolean getSizeBasedWeight() {
return getBoolean(SIZE_BASED_WEIGHT, DEFAULT_SIZE_BASED_WEIGHT);
}
public boolean isEventLogEnabled() {
return getBoolean(EVENT_LOG_ENABLED, DEFAULT_EVENT_LOG_ENABLED);
}
public String getEventlogDir() {
return get(EVENT_LOG_DIR, new File(System.getProperty("hadoop.log.dir",
"/tmp/")).getAbsolutePath() + File.separator + "fairscheduler");
}
public int getPreemptionInterval() {
return getInt(PREEMPTION_INTERVAL, DEFAULT_PREEMPTION_INTERVAL);
}
public int getWaitTimeBeforeKill() {
return getInt(WAIT_TIME_BEFORE_KILL, DEFAULT_WAIT_TIME_BEFORE_KILL);
}
public boolean getUsePortForNodeName() {
return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
}
/**
* Parses a resource config value of a form like "1024", "1024 mb",
* or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
*
* @throws AllocationConfigurationException
*/
public static Resource parseResourceConfigValue(String val)
throws AllocationConfigurationException {
try {
val = StringUtils.toLowerCase(val);
int memory = findResource(val, "mb");
int vcores = findResource(val, "vcores");
return BuilderUtils.newResource(memory, vcores);
} catch (AllocationConfigurationException ex) {
throw ex;
} catch (Exception ex) {
throw new AllocationConfigurationException(
"Error reading resource config", ex);
}
}
public long getUpdateInterval() {
return getLong(UPDATE_INTERVAL_MS, DEFAULT_UPDATE_INTERVAL_MS);
}
private static int findResource(String val, String units)
throws AllocationConfigurationException {
Pattern pattern = Pattern.compile("(\\d+)\\s*" + units);
Matcher matcher = pattern.matcher(val);
if (!matcher.find()) {
throw new AllocationConfigurationException("Missing resource: " + units);
}
return Integer.parseInt(matcher.group(1));
}
}
| 10,985 | 39.538745 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
@Private
@Unstable
public class FSParentQueue extends FSQueue {
private static final Log LOG = LogFactory.getLog(
FSParentQueue.class.getName());
private final List<FSQueue> childQueues = new ArrayList<>();
private Resource demand = Resources.createResource(0);
private int runnableApps;
private ReadWriteLock rwLock = new ReentrantReadWriteLock();
private Lock readLock = rwLock.readLock();
private Lock writeLock = rwLock.writeLock();
public FSParentQueue(String name, FairScheduler scheduler,
FSParentQueue parent) {
super(name, scheduler, parent);
}
public void addChildQueue(FSQueue child) {
writeLock.lock();
try {
childQueues.add(child);
} finally {
writeLock.unlock();
}
}
public void removeChildQueue(FSQueue child) {
writeLock.lock();
try {
childQueues.remove(child);
} finally {
writeLock.unlock();
}
}
@Override
public void recomputeShares() {
readLock.lock();
try {
policy.computeShares(childQueues, getFairShare());
for (FSQueue childQueue : childQueues) {
childQueue.getMetrics().setFairShare(childQueue.getFairShare());
childQueue.recomputeShares();
}
} finally {
readLock.unlock();
}
}
public void recomputeSteadyShares() {
readLock.lock();
try {
policy.computeSteadyShares(childQueues, getSteadyFairShare());
for (FSQueue childQueue : childQueues) {
childQueue.getMetrics()
.setSteadyFairShare(childQueue.getSteadyFairShare());
if (childQueue instanceof FSParentQueue) {
((FSParentQueue) childQueue).recomputeSteadyShares();
}
}
} finally {
readLock.unlock();
}
}
@Override
public void updatePreemptionVariables() {
super.updatePreemptionVariables();
// For child queues
readLock.lock();
try {
for (FSQueue childQueue : childQueues) {
childQueue.updatePreemptionVariables();
}
} finally {
readLock.unlock();
}
}
@Override
public Resource getDemand() {
readLock.lock();
try {
return Resource.newInstance(demand.getMemory(), demand.getVirtualCores());
} finally {
readLock.unlock();
}
}
@Override
public Resource getResourceUsage() {
Resource usage = Resources.createResource(0);
readLock.lock();
try {
for (FSQueue child : childQueues) {
Resources.addTo(usage, child.getResourceUsage());
}
} finally {
readLock.unlock();
}
return usage;
}
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
writeLock.lock();
try {
demand = Resources.createResource(0);
for (FSQueue childQueue : childQueues) {
childQueue.updateDemand();
Resource toAdd = childQueue.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + childQueue.getName() + " " +
toAdd + "; Total resource consumption for " + getName() +
" now " + demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
if (Resources.equals(demand, maxRes)) {
break;
}
}
} finally {
writeLock.unlock();
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand +
"; the max is " + maxRes);
}
}
private QueueUserACLInfo getUserAclInfo(UserGroupInformation user) {
List<QueueACL> operations = new ArrayList<>();
for (QueueACL operation : QueueACL.values()) {
if (hasAccess(operation, user)) {
operations.add(operation);
}
}
return QueueUserACLInfo.newInstance(getQueueName(), operations);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user) {
List<QueueUserACLInfo> userAcls = new ArrayList<QueueUserACLInfo>();
// Add queue acls
userAcls.add(getUserAclInfo(user));
// Add children queue acls
readLock.lock();
try {
for (FSQueue child : childQueues) {
userAcls.addAll(child.getQueueUserAclInfo(user));
}
} finally {
readLock.unlock();
}
return userAcls;
}
@Override
public Resource assignContainer(FSSchedulerNode node) {
Resource assigned = Resources.none();
// If this queue is over its limit, reject
if (!assignContainerPreCheck(node)) {
return assigned;
}
// Hold the write lock when sorting childQueues
writeLock.lock();
try {
Collections.sort(childQueues, policy.getComparator());
} finally {
writeLock.unlock();
}
/*
* We are releasing the lock between the sort and iteration of the
* "sorted" list. There could be changes to the list here:
* 1. Add a child queue to the end of the list, this doesn't affect
* container assignment.
* 2. Remove a child queue, this is probably good to take care of so we
* don't assign to a queue that is going to be removed shortly.
*/
readLock.lock();
try {
for (FSQueue child : childQueues) {
assigned = child.assignContainer(node);
if (!Resources.equals(assigned, Resources.none())) {
break;
}
}
} finally {
readLock.unlock();
}
return assigned;
}
@Override
public RMContainer preemptContainer() {
RMContainer toBePreempted = null;
// Find the childQueue which is most over fair share
FSQueue candidateQueue = null;
Comparator<Schedulable> comparator = policy.getComparator();
readLock.lock();
try {
for (FSQueue queue : childQueues) {
if (candidateQueue == null ||
comparator.compare(queue, candidateQueue) > 0) {
candidateQueue = queue;
}
}
} finally {
readLock.unlock();
}
// Let the selected queue choose which of its container to preempt
if (candidateQueue != null) {
toBePreempted = candidateQueue.preemptContainer();
}
return toBePreempted;
}
@Override
public List<FSQueue> getChildQueues() {
readLock.lock();
try {
return Collections.unmodifiableList(childQueues);
} finally {
readLock.unlock();
}
}
@Override
public void setPolicy(SchedulingPolicy policy)
throws AllocationConfigurationException {
boolean allowed =
SchedulingPolicy.isApplicableTo(policy, (parent == null)
? SchedulingPolicy.DEPTH_ROOT
: SchedulingPolicy.DEPTH_INTERMEDIATE);
if (!allowed) {
throwPolicyDoesnotApplyException(policy);
}
super.policy = policy;
}
public void incrementRunnableApps() {
writeLock.lock();
try {
runnableApps++;
} finally {
writeLock.unlock();
}
}
public void decrementRunnableApps() {
writeLock.lock();
try {
runnableApps--;
} finally {
writeLock.unlock();
}
}
@Override
public int getNumRunnableApps() {
readLock.lock();
try {
return runnableApps;
} finally {
readLock.unlock();
}
}
@Override
public void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps) {
readLock.lock();
try {
for (FSQueue childQueue : childQueues) {
childQueue.collectSchedulerApplications(apps);
}
} finally {
readLock.unlock();
}
}
@Override
public ActiveUsersManager getActiveUsersManager() {
// Should never be called since all applications are submitted to LeafQueues
return null;
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) {
// TODO Auto-generated method stub
}
}
| 9,982 | 27.121127 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.util.ReflectionUtils;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
@Private
@Unstable
public class QueuePlacementPolicy {
private static final Map<String, Class<? extends QueuePlacementRule>> ruleClasses;
static {
Map<String, Class<? extends QueuePlacementRule>> map =
new HashMap<String, Class<? extends QueuePlacementRule>>();
map.put("user", QueuePlacementRule.User.class);
map.put("primaryGroup", QueuePlacementRule.PrimaryGroup.class);
map.put("secondaryGroupExistingQueue",
QueuePlacementRule.SecondaryGroupExistingQueue.class);
map.put("specified", QueuePlacementRule.Specified.class);
map.put("nestedUserQueue",
QueuePlacementRule.NestedUserQueue.class);
map.put("default", QueuePlacementRule.Default.class);
map.put("reject", QueuePlacementRule.Reject.class);
ruleClasses = Collections.unmodifiableMap(map);
}
private final List<QueuePlacementRule> rules;
private final Map<FSQueueType, Set<String>> configuredQueues;
private final Groups groups;
public QueuePlacementPolicy(List<QueuePlacementRule> rules,
Map<FSQueueType, Set<String>> configuredQueues, Configuration conf)
throws AllocationConfigurationException {
for (int i = 0; i < rules.size()-1; i++) {
if (rules.get(i).isTerminal()) {
throw new AllocationConfigurationException("Rules after rule "
+ i + " in queue placement policy can never be reached");
}
}
if (!rules.get(rules.size()-1).isTerminal()) {
throw new AllocationConfigurationException(
"Could get past last queue placement rule without assigning");
}
this.rules = rules;
this.configuredQueues = configuredQueues;
groups = new Groups(conf);
}
/**
* Builds a QueuePlacementPolicy from an xml element.
*/
public static QueuePlacementPolicy fromXml(Element el,
Map<FSQueueType, Set<String>> configuredQueues, Configuration conf)
throws AllocationConfigurationException {
List<QueuePlacementRule> rules = new ArrayList<QueuePlacementRule>();
NodeList elements = el.getChildNodes();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
QueuePlacementRule rule = createAndInitializeRule(node);
rules.add(rule);
}
}
return new QueuePlacementPolicy(rules, configuredQueues, conf);
}
/**
* Create and initialize a rule given a xml node
* @param node
* @return QueuePlacementPolicy
* @throws AllocationConfigurationException
*/
public static QueuePlacementRule createAndInitializeRule(Node node)
throws AllocationConfigurationException {
Element element = (Element) node;
String ruleName = element.getAttribute("name");
if ("".equals(ruleName)) {
throw new AllocationConfigurationException("No name provided for a "
+ "rule element");
}
Class<? extends QueuePlacementRule> clazz = ruleClasses.get(ruleName);
if (clazz == null) {
throw new AllocationConfigurationException("No rule class found for "
+ ruleName);
}
QueuePlacementRule rule = ReflectionUtils.newInstance(clazz, null);
rule.initializeFromXml(element);
return rule;
}
/**
* Build a simple queue placement policy from the allow-undeclared-pools and
* user-as-default-queue configuration options.
*/
public static QueuePlacementPolicy fromConfiguration(Configuration conf,
Map<FSQueueType, Set<String>> configuredQueues) {
boolean create = conf.getBoolean(
FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,
FairSchedulerConfiguration.DEFAULT_ALLOW_UNDECLARED_POOLS);
boolean userAsDefaultQueue = conf.getBoolean(
FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,
FairSchedulerConfiguration.DEFAULT_USER_AS_DEFAULT_QUEUE);
List<QueuePlacementRule> rules = new ArrayList<QueuePlacementRule>();
rules.add(new QueuePlacementRule.Specified().initialize(create, null));
if (userAsDefaultQueue) {
rules.add(new QueuePlacementRule.User().initialize(create, null));
}
if (!userAsDefaultQueue || !create) {
rules.add(new QueuePlacementRule.Default().initialize(true, null));
}
try {
return new QueuePlacementPolicy(rules, configuredQueues, conf);
} catch (AllocationConfigurationException ex) {
throw new RuntimeException("Should never hit exception when loading" +
"placement policy from conf", ex);
}
}
/**
* Applies this rule to an app with the given requested queue and user/group
* information.
*
* @param requestedQueue
* The queue specified in the ApplicationSubmissionContext
* @param user
* The user submitting the app
* @return
* The name of the queue to assign the app to. Or null if the app should
* be rejected.
* @throws IOException
* If an exception is encountered while getting the user's groups
*/
public String assignAppToQueue(String requestedQueue, String user)
throws IOException {
for (QueuePlacementRule rule : rules) {
String queue = rule.assignAppToQueue(requestedQueue, user, groups,
configuredQueues);
if (queue == null || !queue.isEmpty()) {
return queue;
}
}
throw new IllegalStateException("Should have applied a rule before " +
"reaching here");
}
public List<QueuePlacementRule> getRules() {
return rules;
}
}
| 6,838 | 36.78453 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.ContainersAndNMTokensAllocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerRescheduledEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* A scheduler that schedules resources between a set of queues. The scheduler
* keeps track of the resources used by each queue, and attempts to maintain
* fairness by scheduling tasks at queues whose allocations are farthest below
* an ideal fair distribution.
*
* The fair scheduler supports hierarchical queues. All queues descend from a
* queue named "root". Available resources are distributed among the children
* of the root queue in the typical fair scheduling fashion. Then, the children
* distribute the resources assigned to them to their children in the same
* fashion. Applications may only be scheduled on leaf queues. Queues can be
* specified as children of other queues by placing them as sub-elements of
* their parents in the fair scheduler configuration file.
*
* A queue's name starts with the names of its parents, with periods as
* separators. So a queue named "queue1" under the root named, would be
* referred to as "root.queue1", and a queue named "queue2" under a queue
* named "parent1" would be referred to as "root.parent1.queue2".
*/
@LimitedPrivate("yarn")
@Unstable
@SuppressWarnings("unchecked")
public class FairScheduler extends
AbstractYarnScheduler<FSAppAttempt, FSSchedulerNode> {
private FairSchedulerConfiguration conf;
private Resource incrAllocation;
private QueueManager queueMgr;
private volatile Clock clock;
private boolean usePortForNodeName;
private static final Log LOG = LogFactory.getLog(FairScheduler.class);
private static final ResourceCalculator RESOURCE_CALCULATOR =
new DefaultResourceCalculator();
private static final ResourceCalculator DOMINANT_RESOURCE_CALCULATOR =
new DominantResourceCalculator();
// Value that container assignment methods return when a container is
// reserved
public static final Resource CONTAINER_RESERVED = Resources.createResource(-1);
// How often fair shares are re-calculated (ms)
protected long updateInterval;
private final int UPDATE_DEBUG_FREQUENCY = 5;
private int updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
@VisibleForTesting
Thread updateThread;
private final Object updateThreadMonitor = new Object();
@VisibleForTesting
Thread schedulingThread;
// timeout to join when we stop this service
protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
// Aggregate metrics
FSQueueMetrics rootMetrics;
FSOpDurations fsOpDurations;
// Time when we last updated preemption vars
protected long lastPreemptionUpdateTime;
// Time we last ran preemptTasksIfNecessary
private long lastPreemptCheckTime;
// Preemption related variables
protected boolean preemptionEnabled;
protected float preemptionUtilizationThreshold;
// How often tasks are preempted
protected long preemptionInterval;
// ms to wait before force killing stuff (must be longer than a couple
// of heartbeats to give task-kill commands a chance to act).
protected long waitTimeBeforeKill;
// Containers whose AMs have been warned that they will be preempted soon.
private List<RMContainer> warnedContainers = new ArrayList<RMContainer>();
protected boolean sizeBasedWeight; // Give larger weights to larger jobs
protected WeightAdjuster weightAdjuster; // Can be null for no weight adjuster
protected boolean continuousSchedulingEnabled; // Continuous Scheduling enabled or not
protected int continuousSchedulingSleepMs; // Sleep time for each pass in continuous scheduling
private Comparator<NodeId> nodeAvailableResourceComparator =
new NodeAvailableResourceComparator(); // Node available resource comparator
protected double nodeLocalityThreshold; // Cluster threshold for node locality
protected double rackLocalityThreshold; // Cluster threshold for rack locality
protected long nodeLocalityDelayMs; // Delay for node locality
protected long rackLocalityDelayMs; // Delay for rack locality
private FairSchedulerEventLog eventLog; // Machine-readable event log
protected boolean assignMultiple; // Allocate multiple containers per
// heartbeat
protected int maxAssign; // Max containers to assign per heartbeat
@VisibleForTesting
final MaxRunningAppsEnforcer maxRunningEnforcer;
private AllocationFileLoaderService allocsLoader;
@VisibleForTesting
AllocationConfiguration allocConf;
public FairScheduler() {
super(FairScheduler.class.getName());
clock = new SystemClock();
allocsLoader = new AllocationFileLoaderService();
queueMgr = new QueueManager(this);
maxRunningEnforcer = new MaxRunningAppsEnforcer(this);
}
private void validateConf(Configuration conf) {
// validate scheduler memory allocation setting
int minMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int maxMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
if (minMem < 0 || minMem > maxMem) {
throw new YarnRuntimeException("Invalid resource scheduler memory"
+ " allocation configuration"
+ ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB
+ "=" + minMem
+ ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB
+ "=" + maxMem + ", min should equal greater than 0"
+ ", max should be no smaller than min.");
}
// validate scheduler vcores allocation setting
int minVcores = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
int maxVcores = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
if (minVcores < 0 || minVcores > maxVcores) {
throw new YarnRuntimeException("Invalid resource scheduler vcores"
+ " allocation configuration"
+ ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES
+ "=" + minVcores
+ ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES
+ "=" + maxVcores + ", min should equal greater than 0"
+ ", max should be no smaller than min.");
}
}
public FairSchedulerConfiguration getConf() {
return conf;
}
public QueueManager getQueueManager() {
return queueMgr;
}
// Allows UpdateThread to start processing without waiting till updateInterval
void triggerUpdate() {
synchronized (updateThreadMonitor) {
updateThreadMonitor.notify();
}
}
/**
* Thread which calls {@link FairScheduler#update()} every
* <code>updateInterval</code> milliseconds.
*/
private class UpdateThread extends Thread {
@Override
public void run() {
while (!Thread.currentThread().isInterrupted()) {
try {
synchronized (updateThreadMonitor) {
updateThreadMonitor.wait(updateInterval);
}
long start = getClock().getTime();
update();
preemptTasksIfNecessary();
long duration = getClock().getTime() - start;
fsOpDurations.addUpdateThreadRunDuration(duration);
} catch (InterruptedException ie) {
LOG.warn("Update thread interrupted. Exiting.");
return;
} catch (Exception e) {
LOG.error("Exception in fair scheduler UpdateThread", e);
}
}
}
}
/**
* Thread which attempts scheduling resources continuously,
* asynchronous to the node heartbeats.
*/
private class ContinuousSchedulingThread extends Thread {
@Override
public void run() {
while (!Thread.currentThread().isInterrupted()) {
try {
continuousSchedulingAttempt();
Thread.sleep(getContinuousSchedulingSleepMs());
} catch (InterruptedException e) {
LOG.warn("Continuous scheduling thread interrupted. Exiting.", e);
return;
}
}
}
}
/**
* Recompute the internal variables used by the scheduler - per-job weights,
* fair shares, deficits, minimum slot allocations, and amount of used and
* required resources per job.
*/
protected synchronized void update() {
long start = getClock().getTime();
updateStarvationStats(); // Determine if any queues merit preemption
FSQueue rootQueue = queueMgr.getRootQueue();
// Recursively update demands for all queues
rootQueue.updateDemand();
rootQueue.setFairShare(clusterResource);
// Recursively compute fair shares for all queues
// and update metrics
rootQueue.recomputeShares();
updateRootQueueMetrics();
if (LOG.isDebugEnabled()) {
if (--updatesToSkipForDebug < 0) {
updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
LOG.debug("Cluster Capacity: " + clusterResource +
" Allocations: " + rootMetrics.getAllocatedResources() +
" Availability: " + Resource.newInstance(
rootMetrics.getAvailableMB(),
rootMetrics.getAvailableVirtualCores()) +
" Demand: " + rootQueue.getDemand());
}
}
long duration = getClock().getTime() - start;
fsOpDurations.addUpdateCallDuration(duration);
}
/**
* Update the preemption fields for all QueueScheduables, i.e. the times since
* each queue last was at its guaranteed share and over its fair share
* threshold for each type of task.
*/
private void updateStarvationStats() {
lastPreemptionUpdateTime = clock.getTime();
for (FSLeafQueue sched : queueMgr.getLeafQueues()) {
sched.updateStarvationStats();
}
}
/**
* Check for queues that need tasks preempted, either because they have been
* below their guaranteed share for minSharePreemptionTimeout or they have
* been below their fair share threshold for the fairSharePreemptionTimeout. If
* such queues exist, compute how many tasks of each type need to be preempted
* and then select the right ones using preemptTasks.
*/
protected synchronized void preemptTasksIfNecessary() {
if (!shouldAttemptPreemption()) {
return;
}
long curTime = getClock().getTime();
if (curTime - lastPreemptCheckTime < preemptionInterval) {
return;
}
lastPreemptCheckTime = curTime;
Resource resToPreempt = Resources.clone(Resources.none());
for (FSLeafQueue sched : queueMgr.getLeafQueues()) {
Resources.addTo(resToPreempt, resourceDeficit(sched, curTime));
}
if (isResourceGreaterThanNone(resToPreempt)) {
preemptResources(resToPreempt);
}
}
/**
* Preempt a quantity of resources. Each round, we start from the root queue,
* level-by-level, until choosing a candidate application.
* The policy for prioritizing preemption for each queue depends on its
* SchedulingPolicy: (1) fairshare/DRF, choose the ChildSchedulable that is
* most over its fair share; (2) FIFO, choose the childSchedulable that is
* latest launched.
* Inside each application, we further prioritize preemption by choosing
* containers with lowest priority to preempt.
* We make sure that no queue is placed below its fair share in the process.
*/
protected void preemptResources(Resource toPreempt) {
long start = getClock().getTime();
if (Resources.equals(toPreempt, Resources.none())) {
return;
}
// Scan down the list of containers we've already warned and kill them
// if we need to. Remove any containers from the list that we don't need
// or that are no longer running.
Iterator<RMContainer> warnedIter = warnedContainers.iterator();
while (warnedIter.hasNext()) {
RMContainer container = warnedIter.next();
if ((container.getState() == RMContainerState.RUNNING ||
container.getState() == RMContainerState.ALLOCATED) &&
isResourceGreaterThanNone(toPreempt)) {
warnOrKillContainer(container);
Resources.subtractFrom(toPreempt, container.getContainer().getResource());
} else {
warnedIter.remove();
}
}
try {
// Reset preemptedResource for each app
for (FSLeafQueue queue : getQueueManager().getLeafQueues()) {
queue.resetPreemptedResources();
}
while (isResourceGreaterThanNone(toPreempt)) {
RMContainer container =
getQueueManager().getRootQueue().preemptContainer();
if (container == null) {
break;
} else {
warnOrKillContainer(container);
warnedContainers.add(container);
Resources.subtractFrom(
toPreempt, container.getContainer().getResource());
}
}
} finally {
// Clear preemptedResources for each app
for (FSLeafQueue queue : getQueueManager().getLeafQueues()) {
queue.clearPreemptedResources();
}
}
long duration = getClock().getTime() - start;
fsOpDurations.addPreemptCallDuration(duration);
}
private boolean isResourceGreaterThanNone(Resource toPreempt) {
return (toPreempt.getMemory() > 0) || (toPreempt.getVirtualCores() > 0);
}
protected void warnOrKillContainer(RMContainer container) {
ApplicationAttemptId appAttemptId = container.getApplicationAttemptId();
FSAppAttempt app = getSchedulerApp(appAttemptId);
FSLeafQueue queue = app.getQueue();
LOG.info("Preempting container (prio=" + container.getContainer().getPriority() +
"res=" + container.getContainer().getResource() +
") from queue " + queue.getName());
Long time = app.getContainerPreemptionTime(container);
if (time != null) {
// if we asked for preemption more than maxWaitTimeBeforeKill ms ago,
// proceed with kill
if (time + waitTimeBeforeKill < getClock().getTime()) {
ContainerStatus status =
SchedulerUtils.createPreemptedContainerStatus(
container.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER);
// TODO: Not sure if this ever actually adds this to the list of cleanup
// containers on the RMNode (see SchedulerNode.releaseContainer()).
completedContainer(container, status, RMContainerEventType.KILL);
LOG.info("Killing container" + container +
" (after waiting for premption for " +
(getClock().getTime() - time) + "ms)");
}
} else {
// track the request in the FSAppAttempt itself
app.addPreemption(container, getClock().getTime());
}
}
/**
* Return the resource amount that this queue is allowed to preempt, if any.
* If the queue has been below its min share for at least its preemption
* timeout, it should preempt the difference between its current share and
* this min share. If it has been below its fair share preemption threshold
* for at least the fairSharePreemptionTimeout, it should preempt enough tasks
* to get up to its full fair share. If both conditions hold, we preempt the
* max of the two amounts (this shouldn't happen unless someone sets the
* timeouts to be identical for some reason).
*/
protected Resource resourceDeficit(FSLeafQueue sched, long curTime) {
long minShareTimeout = sched.getMinSharePreemptionTimeout();
long fairShareTimeout = sched.getFairSharePreemptionTimeout();
Resource resDueToMinShare = Resources.none();
Resource resDueToFairShare = Resources.none();
ResourceCalculator calc = sched.getPolicy().getResourceCalculator();
if (curTime - sched.getLastTimeAtMinShare() > minShareTimeout) {
Resource target = Resources.componentwiseMin(
sched.getMinShare(), sched.getDemand());
resDueToMinShare = Resources.max(calc, clusterResource,
Resources.none(), Resources.subtract(target, sched.getResourceUsage()));
}
if (curTime - sched.getLastTimeAtFairShareThreshold() > fairShareTimeout) {
Resource target = Resources.componentwiseMin(
sched.getFairShare(), sched.getDemand());
resDueToFairShare = Resources.max(calc, clusterResource,
Resources.none(), Resources.subtract(target, sched.getResourceUsage()));
}
Resource deficit = Resources.max(calc, clusterResource,
resDueToMinShare, resDueToFairShare);
if (Resources.greaterThan(calc, clusterResource,
deficit, Resources.none())) {
String message = "Should preempt " + deficit + " res for queue "
+ sched.getName() + ": resDueToMinShare = " + resDueToMinShare
+ ", resDueToFairShare = " + resDueToFairShare;
LOG.info(message);
}
return deficit;
}
public synchronized RMContainerTokenSecretManager
getContainerTokenSecretManager() {
return rmContext.getContainerTokenSecretManager();
}
// synchronized for sizeBasedWeight
public synchronized ResourceWeights getAppWeight(FSAppAttempt app) {
double weight = 1.0;
if (sizeBasedWeight) {
// Set weight based on current memory demand
weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2);
}
weight *= app.getPriority().getPriority();
if (weightAdjuster != null) {
// Run weight through the user-supplied weightAdjuster
weight = weightAdjuster.adjustWeight(app, weight);
}
ResourceWeights resourceWeights = app.getResourceWeights();
resourceWeights.setWeight((float)weight);
return resourceWeights;
}
public Resource getIncrementResourceCapability() {
return incrAllocation;
}
private FSSchedulerNode getFSSchedulerNode(NodeId nodeId) {
return nodes.get(nodeId);
}
public double getNodeLocalityThreshold() {
return nodeLocalityThreshold;
}
public double getRackLocalityThreshold() {
return rackLocalityThreshold;
}
public long getNodeLocalityDelayMs() {
return nodeLocalityDelayMs;
}
public long getRackLocalityDelayMs() {
return rackLocalityDelayMs;
}
public boolean isContinuousSchedulingEnabled() {
return continuousSchedulingEnabled;
}
public synchronized int getContinuousSchedulingSleepMs() {
return continuousSchedulingSleepMs;
}
public Clock getClock() {
return clock;
}
@VisibleForTesting
void setClock(Clock clock) {
this.clock = clock;
}
public FairSchedulerEventLog getEventLog() {
return eventLog;
}
/**
* Add a new application to the scheduler, with a given id, queue name, and
* user. This will accept a new app even if the user or queue is above
* configured limits, but the app will not be marked as runnable.
*/
protected synchronized void addApplication(ApplicationId applicationId,
String queueName, String user, boolean isAppRecovering) {
if (queueName == null || queueName.isEmpty()) {
String message = "Reject application " + applicationId +
" submitted by user " + user + " with an empty queue name.";
LOG.info(message);
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return;
}
if (queueName.startsWith(".") || queueName.endsWith(".")) {
String message = "Reject application " + applicationId
+ " submitted by user " + user + " with an illegal queue name "
+ queueName + ". "
+ "The queue name cannot start/end with period.";
LOG.info(message);
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return;
}
RMApp rmApp = rmContext.getRMApps().get(applicationId);
FSLeafQueue queue = assignToQueue(rmApp, queueName, user);
if (queue == null) {
return;
}
// Enforce ACLs
UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(user);
if (!queue.hasAccess(QueueACL.SUBMIT_APPLICATIONS, userUgi)
&& !queue.hasAccess(QueueACL.ADMINISTER_QUEUE, userUgi)) {
String msg = "User " + userUgi.getUserName() +
" cannot submit applications to queue " + queue.getName();
LOG.info(msg);
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, msg));
return;
}
SchedulerApplication<FSAppAttempt> application =
new SchedulerApplication<FSAppAttempt>(queue, user);
applications.put(applicationId, application);
queue.getMetrics().submitApp(user);
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", in queue: " + queueName + ", currently num of applications: "
+ applications.size());
if (isAppRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
}
} else {
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
}
}
/**
* Add a new application attempt to the scheduler.
*/
protected synchronized void addApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
boolean transferStateFromPreviousAttempt,
boolean isAttemptRecovering) {
SchedulerApplication<FSAppAttempt> application =
applications.get(applicationAttemptId.getApplicationId());
String user = application.getUser();
FSLeafQueue queue = (FSLeafQueue) application.getQueue();
FSAppAttempt attempt =
new FSAppAttempt(this, applicationAttemptId, user,
queue, new ActiveUsersManager(getRootQueueMetrics()),
rmContext);
if (transferStateFromPreviousAttempt) {
attempt.transferStateFromPreviousAttempt(application
.getCurrentAppAttempt());
}
application.setCurrentAppAttempt(attempt);
boolean runnable = maxRunningEnforcer.canAppBeRunnable(queue, user);
queue.addApp(attempt, runnable);
if (runnable) {
maxRunningEnforcer.trackRunnableApp(attempt);
} else {
maxRunningEnforcer.trackNonRunnableApp(attempt);
}
queue.getMetrics().submitAppAttempt(user);
LOG.info("Added Application Attempt " + applicationAttemptId
+ " to scheduler from user: " + user);
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationAttemptId
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else {
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId,
RMAppAttemptEventType.ATTEMPT_ADDED));
}
}
/**
* Helper method that attempts to assign the app to a queue. The method is
* responsible to call the appropriate event-handler if the app is rejected.
*/
@VisibleForTesting
FSLeafQueue assignToQueue(RMApp rmApp, String queueName, String user) {
FSLeafQueue queue = null;
String appRejectMsg = null;
try {
QueuePlacementPolicy placementPolicy = allocConf.getPlacementPolicy();
queueName = placementPolicy.assignAppToQueue(queueName, user);
if (queueName == null) {
appRejectMsg = "Application rejected by queue placement policy";
} else {
queue = queueMgr.getLeafQueue(queueName, true);
if (queue == null) {
appRejectMsg = queueName + " is not a leaf queue";
}
}
} catch (InvalidQueueNameException qne) {
appRejectMsg = qne.getMessage();
} catch (IOException ioe) {
appRejectMsg = "Error assigning app to queue " + queueName;
}
if (appRejectMsg != null && rmApp != null) {
LOG.error(appRejectMsg);
rmContext.getDispatcher().getEventHandler().handle(
new RMAppRejectedEvent(rmApp.getApplicationId(), appRejectMsg));
return null;
}
if (rmApp != null) {
rmApp.setQueue(queue.getName());
} else {
LOG.error("Couldn't find RM app to set queue name on");
}
return queue;
}
private synchronized void removeApplication(ApplicationId applicationId,
RMAppState finalState) {
SchedulerApplication<FSAppAttempt> application =
applications.get(applicationId);
if (application == null){
LOG.warn("Couldn't find application " + applicationId);
return;
}
application.stop(finalState);
applications.remove(applicationId);
}
private synchronized void removeApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) {
LOG.info("Application " + applicationAttemptId + " is done." +
" finalState=" + rmAppAttemptFinalState);
SchedulerApplication<FSAppAttempt> application =
applications.get(applicationAttemptId.getApplicationId());
FSAppAttempt attempt = getSchedulerApp(applicationAttemptId);
if (attempt == null || application == null) {
LOG.info("Unknown application " + applicationAttemptId + " has completed!");
return;
}
// Release all the running containers
for (RMContainer rmContainer : attempt.getLiveContainers()) {
if (keepContainers
&& rmContainer.getState().equals(RMContainerState.RUNNING)) {
// do not kill the running container in the case of work-preserving AM
// restart.
LOG.info("Skip killing " + rmContainer.getContainerId());
continue;
}
completedContainer(rmContainer,
SchedulerUtils.createAbnormalContainerStatus(
rmContainer.getContainerId(),
SchedulerUtils.COMPLETED_APPLICATION),
RMContainerEventType.KILL);
}
// Release all reserved containers
for (RMContainer rmContainer : attempt.getReservedContainers()) {
completedContainer(rmContainer,
SchedulerUtils.createAbnormalContainerStatus(
rmContainer.getContainerId(),
"Application Complete"),
RMContainerEventType.KILL);
}
// Clean up pending requests, metrics etc.
attempt.stop(rmAppAttemptFinalState);
// Inform the queue
FSLeafQueue queue = queueMgr.getLeafQueue(attempt.getQueue()
.getQueueName(), false);
boolean wasRunnable = queue.removeApp(attempt);
if (wasRunnable) {
maxRunningEnforcer.untrackRunnableApp(attempt);
maxRunningEnforcer.updateRunnabilityOnAppRemoval(attempt,
attempt.getQueue());
} else {
maxRunningEnforcer.untrackNonRunnableApp(attempt);
}
}
/**
* Clean up a completed container.
*/
@Override
protected synchronized void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
if (rmContainer == null) {
LOG.info("Container " + containerStatus.getContainerId()
+ " completed with event " + event);
return;
}
Container container = rmContainer.getContainer();
// Get the application for the finished container
FSAppAttempt application =
getCurrentAttemptForContainer(container.getId());
ApplicationId appId =
container.getId().getApplicationAttemptId().getApplicationId();
if (application == null) {
LOG.info("Container " + container + " of" +
" finished application " + appId +
" completed with event " + event);
return;
}
// Get the node on which the container was allocated
FSSchedulerNode node = getFSSchedulerNode(container.getNodeId());
if (rmContainer.getState() == RMContainerState.RESERVED) {
application.unreserve(rmContainer.getReservedPriority(), node);
} else {
application.containerCompleted(rmContainer, containerStatus, event);
node.releaseContainer(container);
updateRootQueueMetrics();
}
LOG.info("Application attempt " + application.getApplicationAttemptId()
+ " released container " + container.getId() + " on node: " + node
+ " with event: " + event);
}
private synchronized void addNode(List<NMContainerStatus> containerReports,
RMNode node) {
FSSchedulerNode schedulerNode = new FSSchedulerNode(node, usePortForNodeName);
nodes.put(node.getNodeID(), schedulerNode);
Resources.addTo(clusterResource, node.getTotalCapability());
updateMaximumAllocation(schedulerNode, true);
triggerUpdate();
queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
queueMgr.getRootQueue().recomputeSteadyShares();
LOG.info("Added node " + node.getNodeAddress() +
" cluster capacity: " + clusterResource);
recoverContainersOnNode(containerReports, node);
updateRootQueueMetrics();
}
private synchronized void removeNode(RMNode rmNode) {
FSSchedulerNode node = getFSSchedulerNode(rmNode.getNodeID());
// This can occur when an UNHEALTHY node reconnects
if (node == null) {
return;
}
Resources.subtractFrom(clusterResource, rmNode.getTotalCapability());
updateRootQueueMetrics();
triggerUpdate();
// Remove running containers
List<RMContainer> runningContainers = node.getRunningContainers();
for (RMContainer container : runningContainers) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
// Remove reservations, if any
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
completedContainer(reservedContainer,
SchedulerUtils.createAbnormalContainerStatus(
reservedContainer.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
nodes.remove(rmNode.getNodeID());
queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
queueMgr.getRootQueue().recomputeSteadyShares();
updateMaximumAllocation(node, false);
LOG.info("Removed node " + rmNode.getNodeAddress() +
" cluster capacity: " + clusterResource);
}
@Override
public Allocation allocate(ApplicationAttemptId appAttemptId,
List<ResourceRequest> ask, List<ContainerId> release,
List<String> blacklistAdditions, List<String> blacklistRemovals) {
// Make sure this application exists
FSAppAttempt application = getSchedulerApp(appAttemptId);
if (application == null) {
LOG.info("Calling allocate on removed " +
"or non existant application " + appAttemptId);
return EMPTY_ALLOCATION;
}
// Sanity check
SchedulerUtils.normalizeRequests(ask, DOMINANT_RESOURCE_CALCULATOR,
clusterResource, minimumAllocation, getMaximumResourceCapability(),
incrAllocation);
// Record container allocation start time
application.recordContainerRequestTime(getClock().getTime());
// Release containers
releaseContainers(release, application);
synchronized (application) {
if (!ask.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: pre-update" +
" applicationAttemptId=" + appAttemptId +
" application=" + application.getApplicationId());
}
application.showRequests();
// Update application requests
application.updateResourceRequests(ask);
application.showRequests();
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: post-update" +
" applicationAttemptId=" + appAttemptId +
" #ask=" + ask.size() +
" reservation= " + application.getCurrentReservation());
LOG.debug("Preempting " + application.getPreemptionContainers().size()
+ " container(s)");
}
Set<ContainerId> preemptionContainerIds = new HashSet<ContainerId>();
for (RMContainer container : application.getPreemptionContainers()) {
preemptionContainerIds.add(container.getContainerId());
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
ContainersAndNMTokensAllocation allocation =
application.pullNewlyAllocatedContainersAndNMTokens();
// Record container allocation time
if (!(allocation.getContainerList().isEmpty())) {
application.recordContainerAllocationTime(getClock().getTime());
}
Resource headroom = application.getHeadroom();
application.setApplicationHeadroomForMetrics(headroom);
return new Allocation(allocation.getContainerList(), headroom,
preemptionContainerIds, null, null, allocation.getNMTokenList());
}
}
/**
* Process a heartbeat update from a node.
*/
private synchronized void nodeUpdate(RMNode nm) {
long start = getClock().getTime();
if (LOG.isDebugEnabled()) {
LOG.debug("nodeUpdate: " + nm + " cluster capacity: " + clusterResource);
}
eventLog.log("HEARTBEAT", nm.getHostName());
FSSchedulerNode node = getFSSchedulerNode(nm.getNodeID());
List<UpdatedContainerInfo> containerInfoList = nm.pullContainerUpdates();
List<ContainerStatus> newlyLaunchedContainers = new ArrayList<ContainerStatus>();
List<ContainerStatus> completedContainers = new ArrayList<ContainerStatus>();
for(UpdatedContainerInfo containerInfo : containerInfoList) {
newlyLaunchedContainers.addAll(containerInfo.getNewlyLaunchedContainers());
completedContainers.addAll(containerInfo.getCompletedContainers());
}
// Processing the newly launched containers
for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
containerLaunchedOnNode(launchedContainer.getContainerId(), node);
}
// Process completed containers
for (ContainerStatus completedContainer : completedContainers) {
ContainerId containerId = completedContainer.getContainerId();
LOG.debug("Container FINISHED: " + containerId);
completedContainer(getRMContainer(containerId),
completedContainer, RMContainerEventType.FINISHED);
}
if (continuousSchedulingEnabled) {
if (!completedContainers.isEmpty()) {
attemptScheduling(node);
}
} else {
attemptScheduling(node);
}
long duration = getClock().getTime() - start;
fsOpDurations.addNodeUpdateDuration(duration);
}
void continuousSchedulingAttempt() throws InterruptedException {
long start = getClock().getTime();
List<NodeId> nodeIdList = new ArrayList<NodeId>(nodes.keySet());
// Sort the nodes by space available on them, so that we offer
// containers on emptier nodes first, facilitating an even spread. This
// requires holding the scheduler lock, so that the space available on a
// node doesn't change during the sort.
synchronized (this) {
Collections.sort(nodeIdList, nodeAvailableResourceComparator);
}
// iterate all nodes
for (NodeId nodeId : nodeIdList) {
FSSchedulerNode node = getFSSchedulerNode(nodeId);
try {
if (node != null && Resources.fitsIn(minimumAllocation,
node.getAvailableResource())) {
attemptScheduling(node);
}
} catch (Throwable ex) {
LOG.error("Error while attempting scheduling for node " + node +
": " + ex.toString(), ex);
}
}
long duration = getClock().getTime() - start;
fsOpDurations.addContinuousSchedulingRunDuration(duration);
}
/** Sort nodes by available resource */
private class NodeAvailableResourceComparator implements Comparator<NodeId> {
@Override
public int compare(NodeId n1, NodeId n2) {
if (!nodes.containsKey(n1)) {
return 1;
}
if (!nodes.containsKey(n2)) {
return -1;
}
return RESOURCE_CALCULATOR.compare(clusterResource,
nodes.get(n2).getAvailableResource(),
nodes.get(n1).getAvailableResource());
}
}
@VisibleForTesting
synchronized void attemptScheduling(FSSchedulerNode node) {
if (rmContext.isWorkPreservingRecoveryEnabled()
&& !rmContext.isSchedulerReadyForAllocatingContainers()) {
return;
}
final NodeId nodeID = node.getNodeID();
if (!nodes.containsKey(nodeID)) {
// The node might have just been removed while this thread was waiting
// on the synchronized lock before it entered this synchronized method
LOG.info("Skipping scheduling as the node " + nodeID +
" has been removed");
return;
}
// Assign new containers...
// 1. Check for reserved applications
// 2. Schedule if there are no reservations
boolean validReservation = false;
FSAppAttempt reservedAppSchedulable = node.getReservedAppSchedulable();
if (reservedAppSchedulable != null) {
validReservation = reservedAppSchedulable.assignReservedContainer(node);
}
if (!validReservation) {
// No reservation, schedule at queue which is farthest below fair share
int assignedContainers = 0;
while (node.getReservedContainer() == null) {
boolean assignedContainer = false;
if (!queueMgr.getRootQueue().assignContainer(node).equals(
Resources.none())) {
assignedContainers++;
assignedContainer = true;
}
if (!assignedContainer) { break; }
if (!assignMultiple) { break; }
if ((assignedContainers >= maxAssign) && (maxAssign > 0)) { break; }
}
}
updateRootQueueMetrics();
}
public FSAppAttempt getSchedulerApp(ApplicationAttemptId appAttemptId) {
return super.getApplicationAttempt(appAttemptId);
}
@Override
public ResourceCalculator getResourceCalculator() {
return RESOURCE_CALCULATOR;
}
/**
* Subqueue metrics might be a little out of date because fair shares are
* recalculated at the update interval, but the root queue metrics needs to
* be updated synchronously with allocations and completions so that cluster
* metrics will be consistent.
*/
private void updateRootQueueMetrics() {
rootMetrics.setAvailableResourcesToQueue(
Resources.subtract(
clusterResource, rootMetrics.getAllocatedResources()));
}
/**
* Check if preemption is enabled and the utilization threshold for
* preemption is met.
*
* @return true if preemption should be attempted, false otherwise.
*/
private boolean shouldAttemptPreemption() {
if (preemptionEnabled) {
return (preemptionUtilizationThreshold < Math.max(
(float) rootMetrics.getAllocatedMB() / clusterResource.getMemory(),
(float) rootMetrics.getAllocatedVirtualCores() /
clusterResource.getVirtualCores()));
}
return false;
}
@Override
public QueueMetrics getRootQueueMetrics() {
return rootMetrics;
}
@Override
public void handle(SchedulerEvent event) {
switch (event.getType()) {
case NODE_ADDED:
if (!(event instanceof NodeAddedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
addNode(nodeAddedEvent.getContainerReports(),
nodeAddedEvent.getAddedRMNode());
break;
case NODE_REMOVED:
if (!(event instanceof NodeRemovedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
removeNode(nodeRemovedEvent.getRemovedRMNode());
break;
case NODE_UPDATE:
if (!(event instanceof NodeUpdateSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event;
nodeUpdate(nodeUpdatedEvent.getRMNode());
break;
case APP_ADDED:
if (!(event instanceof AppAddedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
String queueName =
resolveReservationQueueName(appAddedEvent.getQueue(),
appAddedEvent.getApplicationId(),
appAddedEvent.getReservationID());
if (queueName != null) {
addApplication(appAddedEvent.getApplicationId(),
queueName, appAddedEvent.getUser(),
appAddedEvent.getIsAppRecovering());
}
break;
case APP_REMOVED:
if (!(event instanceof AppRemovedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
removeApplication(appRemovedEvent.getApplicationID(),
appRemovedEvent.getFinalState());
break;
case NODE_RESOURCE_UPDATE:
if (!(event instanceof NodeResourceUpdateSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent =
(NodeResourceUpdateSchedulerEvent)event;
updateNodeResource(nodeResourceUpdatedEvent.getRMNode(),
nodeResourceUpdatedEvent.getResourceOption());
break;
case APP_ATTEMPT_ADDED:
if (!(event instanceof AppAttemptAddedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
AppAttemptAddedSchedulerEvent appAttemptAddedEvent =
(AppAttemptAddedSchedulerEvent) event;
addApplicationAttempt(appAttemptAddedEvent.getApplicationAttemptId(),
appAttemptAddedEvent.getTransferStateFromPreviousAttempt(),
appAttemptAddedEvent.getIsAttemptRecovering());
break;
case APP_ATTEMPT_REMOVED:
if (!(event instanceof AppAttemptRemovedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
AppAttemptRemovedSchedulerEvent appAttemptRemovedEvent =
(AppAttemptRemovedSchedulerEvent) event;
removeApplicationAttempt(
appAttemptRemovedEvent.getApplicationAttemptID(),
appAttemptRemovedEvent.getFinalAttemptState(),
appAttemptRemovedEvent.getKeepContainersAcrossAppAttempts());
break;
case CONTAINER_EXPIRED:
if (!(event instanceof ContainerExpiredSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
ContainerExpiredSchedulerEvent containerExpiredEvent =
(ContainerExpiredSchedulerEvent)event;
ContainerId containerId = containerExpiredEvent.getContainerId();
completedContainer(getRMContainer(containerId),
SchedulerUtils.createAbnormalContainerStatus(
containerId,
SchedulerUtils.EXPIRED_CONTAINER),
RMContainerEventType.EXPIRE);
break;
case CONTAINER_RESCHEDULED:
if (!(event instanceof ContainerRescheduledEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
ContainerRescheduledEvent containerRescheduledEvent =
(ContainerRescheduledEvent) event;
RMContainer container = containerRescheduledEvent.getContainer();
recoverResourceRequestForContainer(container);
break;
default:
LOG.error("Unknown event arrived at FairScheduler: " + event.toString());
}
}
private synchronized String resolveReservationQueueName(String queueName,
ApplicationId applicationId, ReservationId reservationID) {
FSQueue queue = queueMgr.getQueue(queueName);
if ((queue == null) || !allocConf.isReservable(queue.getQueueName())) {
return queueName;
}
// Use fully specified name from now on (including root. prefix)
queueName = queue.getQueueName();
if (reservationID != null) {
String resQName = queueName + "." + reservationID.toString();
queue = queueMgr.getQueue(resQName);
if (queue == null) {
String message =
"Application "
+ applicationId
+ " submitted to a reservation which is not yet currently active: "
+ resQName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return null;
}
if (!queue.getParent().getQueueName().equals(queueName)) {
String message =
"Application: " + applicationId + " submitted to a reservation "
+ resQName + " which does not belong to the specified queue: "
+ queueName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return null;
}
// use the reservation queue to run the app
queueName = resQName;
} else {
// use the default child queue of the plan for unreserved apps
queueName = getDefaultQueueForPlanQueue(queueName);
}
return queueName;
}
private String getDefaultQueueForPlanQueue(String queueName) {
String planName = queueName.substring(queueName.lastIndexOf(".") + 1);
queueName = queueName + "." + planName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
return queueName;
}
@Override
public void recover(RMState state) throws Exception {
// NOT IMPLEMENTED
}
public synchronized void setRMContext(RMContext rmContext) {
this.rmContext = rmContext;
}
private void initScheduler(Configuration conf) throws IOException {
synchronized (this) {
this.conf = new FairSchedulerConfiguration(conf);
validateConf(this.conf);
minimumAllocation = this.conf.getMinimumAllocation();
initMaximumResourceCapability(this.conf.getMaximumAllocation());
incrAllocation = this.conf.getIncrementAllocation();
continuousSchedulingEnabled = this.conf.isContinuousSchedulingEnabled();
continuousSchedulingSleepMs =
this.conf.getContinuousSchedulingSleepMs();
nodeLocalityThreshold = this.conf.getLocalityThresholdNode();
rackLocalityThreshold = this.conf.getLocalityThresholdRack();
nodeLocalityDelayMs = this.conf.getLocalityDelayNodeMs();
rackLocalityDelayMs = this.conf.getLocalityDelayRackMs();
preemptionEnabled = this.conf.getPreemptionEnabled();
preemptionUtilizationThreshold =
this.conf.getPreemptionUtilizationThreshold();
assignMultiple = this.conf.getAssignMultiple();
maxAssign = this.conf.getMaxAssign();
sizeBasedWeight = this.conf.getSizeBasedWeight();
preemptionInterval = this.conf.getPreemptionInterval();
waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill();
usePortForNodeName = this.conf.getUsePortForNodeName();
updateInterval = this.conf.getUpdateInterval();
if (updateInterval < 0) {
updateInterval = FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS;
LOG.warn(FairSchedulerConfiguration.UPDATE_INTERVAL_MS
+ " is invalid, so using default value " +
+FairSchedulerConfiguration.DEFAULT_UPDATE_INTERVAL_MS
+ " ms instead");
}
rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf);
fsOpDurations = FSOpDurations.getInstance(true);
// This stores per-application scheduling information
this.applications = new ConcurrentHashMap<
ApplicationId, SchedulerApplication<FSAppAttempt>>();
this.eventLog = new FairSchedulerEventLog();
eventLog.init(this.conf);
allocConf = new AllocationConfiguration(conf);
try {
queueMgr.initialize(conf);
} catch (Exception e) {
throw new IOException("Failed to start FairScheduler", e);
}
updateThread = new UpdateThread();
updateThread.setName("FairSchedulerUpdateThread");
updateThread.setDaemon(true);
if (continuousSchedulingEnabled) {
// start continuous scheduling thread
schedulingThread = new ContinuousSchedulingThread();
schedulingThread.setName("FairSchedulerContinuousScheduling");
schedulingThread.setDaemon(true);
}
}
allocsLoader.init(conf);
allocsLoader.setReloadListener(new AllocationReloadListener());
// If we fail to load allocations file on initialize, we want to fail
// immediately. After a successful load, exceptions on future reloads
// will just result in leaving things as they are.
try {
allocsLoader.reloadAllocations();
} catch (Exception e) {
throw new IOException("Failed to initialize FairScheduler", e);
}
}
private synchronized void startSchedulerThreads() {
Preconditions.checkNotNull(updateThread, "updateThread is null");
Preconditions.checkNotNull(allocsLoader, "allocsLoader is null");
updateThread.start();
if (continuousSchedulingEnabled) {
Preconditions.checkNotNull(schedulingThread, "schedulingThread is null");
schedulingThread.start();
}
allocsLoader.start();
}
@Override
public void serviceInit(Configuration conf) throws Exception {
initScheduler(conf);
super.serviceInit(conf);
}
@Override
public void serviceStart() throws Exception {
startSchedulerThreads();
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
synchronized (this) {
if (updateThread != null) {
updateThread.interrupt();
updateThread.join(THREAD_JOIN_TIMEOUT_MS);
}
if (continuousSchedulingEnabled) {
if (schedulingThread != null) {
schedulingThread.interrupt();
schedulingThread.join(THREAD_JOIN_TIMEOUT_MS);
}
}
if (allocsLoader != null) {
allocsLoader.stop();
}
}
super.serviceStop();
}
@Override
public void reinitialize(Configuration conf, RMContext rmContext)
throws IOException {
try {
allocsLoader.reloadAllocations();
} catch (Exception e) {
LOG.error("Failed to reload allocations file", e);
}
}
@Override
public QueueInfo getQueueInfo(String queueName, boolean includeChildQueues,
boolean recursive) throws IOException {
if (!queueMgr.exists(queueName)) {
throw new IOException("queue " + queueName + " does not exist");
}
return queueMgr.getQueue(queueName).getQueueInfo(includeChildQueues,
recursive);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo() {
UserGroupInformation user;
try {
user = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
return new ArrayList<QueueUserACLInfo>();
}
return queueMgr.getRootQueue().getQueueUserAclInfo(user);
}
@Override
public int getNumClusterNodes() {
return nodes.size();
}
@Override
public synchronized boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName) {
FSQueue queue = getQueueManager().getQueue(queueName);
if (queue == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("ACL not found for queue access-type " + acl
+ " for queue " + queueName);
}
return false;
}
return queue.hasAccess(acl, callerUGI);
}
public AllocationConfiguration getAllocationConfiguration() {
return allocConf;
}
private class AllocationReloadListener implements
AllocationFileLoaderService.Listener {
@Override
public void onReload(AllocationConfiguration queueInfo) {
// Commit the reload; also create any queue defined in the alloc file
// if it does not already exist, so it can be displayed on the web UI.
synchronized (FairScheduler.this) {
allocConf = queueInfo;
allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
queueMgr.updateAllocationConfiguration(allocConf);
maxRunningEnforcer.updateRunnabilityOnReload();
}
}
}
@Override
public List<ApplicationAttemptId> getAppsInQueue(String queueName) {
FSQueue queue = queueMgr.getQueue(queueName);
if (queue == null) {
return null;
}
List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>();
queue.collectSchedulerApplications(apps);
return apps;
}
@Override
public synchronized String moveApplication(ApplicationId appId,
String queueName) throws YarnException {
SchedulerApplication<FSAppAttempt> app = applications.get(appId);
if (app == null) {
throw new YarnException("App to be moved " + appId + " not found.");
}
FSAppAttempt attempt = (FSAppAttempt) app.getCurrentAppAttempt();
// To serialize with FairScheduler#allocate, synchronize on app attempt
synchronized (attempt) {
FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
String destQueueName = handleMoveToPlanQueue(queueName);
FSLeafQueue targetQueue = queueMgr.getLeafQueue(destQueueName, false);
if (targetQueue == null) {
throw new YarnException("Target queue " + queueName
+ " not found or is not a leaf queue.");
}
if (targetQueue == oldQueue) {
return oldQueue.getQueueName();
}
if (oldQueue.isRunnableApp(attempt)) {
verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue);
}
executeMove(app, attempt, oldQueue, targetQueue);
return targetQueue.getQueueName();
}
}
private void verifyMoveDoesNotViolateConstraints(FSAppAttempt app,
FSLeafQueue oldQueue, FSLeafQueue targetQueue) throws YarnException {
String queueName = targetQueue.getQueueName();
ApplicationAttemptId appAttId = app.getApplicationAttemptId();
// When checking maxResources and maxRunningApps, only need to consider
// queues before the lowest common ancestor of the two queues because the
// total running apps in queues above will not be changed.
FSQueue lowestCommonAncestor = findLowestCommonAncestorQueue(oldQueue,
targetQueue);
Resource consumption = app.getCurrentConsumption();
// Check whether the move would go over maxRunningApps or maxShare
FSQueue cur = targetQueue;
while (cur != lowestCommonAncestor) {
// maxRunningApps
if (cur.getNumRunnableApps() == allocConf.getQueueMaxApps(cur.getQueueName())) {
throw new YarnException("Moving app attempt " + appAttId + " to queue "
+ queueName + " would violate queue maxRunningApps constraints on"
+ " queue " + cur.getQueueName());
}
// maxShare
if (!Resources.fitsIn(Resources.add(cur.getResourceUsage(), consumption),
cur.getMaxShare())) {
throw new YarnException("Moving app attempt " + appAttId + " to queue "
+ queueName + " would violate queue maxShare constraints on"
+ " queue " + cur.getQueueName());
}
cur = cur.getParent();
}
}
/**
* Helper for moveApplication, which has appropriate synchronization, so all
* operations will be atomic.
*/
private void executeMove(SchedulerApplication<FSAppAttempt> app,
FSAppAttempt attempt, FSLeafQueue oldQueue, FSLeafQueue newQueue) {
boolean wasRunnable = oldQueue.removeApp(attempt);
// if app was not runnable before, it may be runnable now
boolean nowRunnable = maxRunningEnforcer.canAppBeRunnable(newQueue,
attempt.getUser());
if (wasRunnable && !nowRunnable) {
throw new IllegalStateException("Should have already verified that app "
+ attempt.getApplicationId() + " would be runnable in new queue");
}
if (wasRunnable) {
maxRunningEnforcer.untrackRunnableApp(attempt);
} else if (nowRunnable) {
// App has changed from non-runnable to runnable
maxRunningEnforcer.untrackNonRunnableApp(attempt);
}
attempt.move(newQueue); // This updates all the metrics
app.setQueue(newQueue);
newQueue.addApp(attempt, nowRunnable);
if (nowRunnable) {
maxRunningEnforcer.trackRunnableApp(attempt);
}
if (wasRunnable) {
maxRunningEnforcer.updateRunnabilityOnAppRemoval(attempt, oldQueue);
}
}
@VisibleForTesting
FSQueue findLowestCommonAncestorQueue(FSQueue queue1, FSQueue queue2) {
// Because queue names include ancestors, separated by periods, we can find
// the lowest common ancestors by going from the start of the names until
// there's a character that doesn't match.
String name1 = queue1.getName();
String name2 = queue2.getName();
// We keep track of the last period we encounter to avoid returning root.apple
// when the queues are root.applepie and root.appletart
int lastPeriodIndex = -1;
for (int i = 0; i < Math.max(name1.length(), name2.length()); i++) {
if (name1.length() <= i || name2.length() <= i ||
name1.charAt(i) != name2.charAt(i)) {
return queueMgr.getQueue(name1.substring(0, lastPeriodIndex));
} else if (name1.charAt(i) == '.') {
lastPeriodIndex = i;
}
}
return queue1; // names are identical
}
/**
* Process resource update on a node and update Queue.
*/
@Override
public synchronized void updateNodeResource(RMNode nm,
ResourceOption resourceOption) {
super.updateNodeResource(nm, resourceOption);
updateRootQueueMetrics();
queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
queueMgr.getRootQueue().recomputeSteadyShares();
}
/** {@inheritDoc} */
@Override
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes() {
return EnumSet
.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU);
}
@Override
public Set<String> getPlanQueues() throws YarnException {
Set<String> planQueues = new HashSet<String>();
for (FSQueue fsQueue : queueMgr.getQueues()) {
String queueName = fsQueue.getName();
if (allocConf.isReservable(queueName)) {
planQueues.add(queueName);
}
}
return planQueues;
}
@Override
public void setEntitlement(String queueName,
QueueEntitlement entitlement) throws YarnException {
FSLeafQueue reservationQueue = queueMgr.getLeafQueue(queueName, false);
if (reservationQueue == null) {
throw new YarnException("Target queue " + queueName
+ " not found or is not a leaf queue.");
}
reservationQueue.setWeights(entitlement.getCapacity());
// TODO Does MaxCapacity need to be set for fairScheduler ?
}
/**
* Only supports removing empty leaf queues
* @param queueName name of queue to remove
* @throws YarnException if queue to remove is either not a leaf or if its
* not empty
*/
@Override
public void removeQueue(String queueName) throws YarnException {
FSLeafQueue reservationQueue = queueMgr.getLeafQueue(queueName, false);
if (reservationQueue != null) {
if (!queueMgr.removeLeafQueue(queueName)) {
throw new YarnException("Could not remove queue " + queueName + " as " +
"its either not a leaf queue or its not empty");
}
}
}
private String handleMoveToPlanQueue(String targetQueueName) {
FSQueue dest = queueMgr.getQueue(targetQueueName);
if (dest != null && allocConf.isReservable(dest.getQueueName())) {
// use the default child reservation queue of the plan
targetQueueName = getDefaultQueueForPlanQueue(targetQueueName);
}
return targetQueueName;
}
}
| 65,359 | 37.537736 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/WeightAdjuster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configurable;
/**
* A pluggable object for altering the weights of apps in the fair scheduler,
* which is used for example by {@link NewAppWeightBooster} to give higher
* weight to new jobs so that short jobs finish faster.
*
* May implement {@link Configurable} to access configuration parameters.
*/
@Private
@Unstable
public interface WeightAdjuster {
public double adjustWeight(FSAppAttempt app, double curWeight);
}
| 1,475 | 38.891892 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@Metrics(context="yarn")
public class FSQueueMetrics extends QueueMetrics {
@Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB;
@Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores;
@Metric("Steady fair share of memory in MB") MutableGaugeInt steadyFairShareMB;
@Metric("Steady fair share of CPU in vcores") MutableGaugeInt steadyFairShareVCores;
@Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB;
@Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores;
@Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB;
@Metric("Maximum share of CPU in vcores") MutableGaugeInt maxShareVCores;
FSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
super(ms, queueName, parent, enableUserMetrics, conf);
}
public void setFairShare(Resource resource) {
fairShareMB.set(resource.getMemory());
fairShareVCores.set(resource.getVirtualCores());
}
public int getFairShareMB() {
return fairShareMB.value();
}
public int getFairShareVirtualCores() {
return fairShareVCores.value();
}
public void setSteadyFairShare(Resource resource) {
steadyFairShareMB.set(resource.getMemory());
steadyFairShareVCores.set(resource.getVirtualCores());
}
public int getSteadyFairShareMB() {
return steadyFairShareMB.value();
}
public int getSteadyFairShareVCores() {
return steadyFairShareVCores.value();
}
public void setMinShare(Resource resource) {
minShareMB.set(resource.getMemory());
minShareVCores.set(resource.getVirtualCores());
}
public int getMinShareMB() {
return minShareMB.value();
}
public int getMinShareVirtualCores() {
return minShareVCores.value();
}
public void setMaxShare(Resource resource) {
maxShareMB.set(resource.getMemory());
maxShareVCores.set(resource.getVirtualCores());
}
public int getMaxShareMB() {
return maxShareMB.value();
}
public int getMaxShareVirtualCores() {
return maxShareVCores.value();
}
public synchronized
static FSQueueMetrics forQueue(String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics = ms.register(
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (FSQueueMetrics)metrics;
}
}
| 4,233 | 33.704918 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
import java.io.Serializable;
import java.util.Collection;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public class FifoPolicy extends SchedulingPolicy {
@VisibleForTesting
public static final String NAME = "FIFO";
private static final FifoComparator COMPARATOR = new FifoComparator();
private static final DefaultResourceCalculator CALCULATOR =
new DefaultResourceCalculator();
@Override
public String getName() {
return NAME;
}
/**
* Compare Schedulables in order of priority and then submission time, as in
* the default FIFO scheduler in Hadoop.
*/
static class FifoComparator implements Comparator<Schedulable>, Serializable {
private static final long serialVersionUID = -5905036205491177060L;
@Override
public int compare(Schedulable s1, Schedulable s2) {
int res = s1.getPriority().compareTo(s2.getPriority());
if (res == 0) {
res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
}
if (res == 0) {
// In the rare case where jobs were submitted at the exact same time,
// compare them by name (which will be the JobID) to get a deterministic
// ordering, so we don't alternately launch tasks from different jobs.
res = s1.getName().compareTo(s2.getName());
}
return res;
}
}
@Override
public Comparator<Schedulable> getComparator() {
return COMPARATOR;
}
@Override
public ResourceCalculator getResourceCalculator() {
return CALCULATOR;
}
@Override
public void computeShares(Collection<? extends Schedulable> schedulables,
Resource totalResources) {
if (schedulables.isEmpty()) {
return;
}
Schedulable earliest = null;
for (Schedulable schedulable : schedulables) {
if (earliest == null ||
schedulable.getStartTime() < earliest.getStartTime()) {
earliest = schedulable;
}
}
earliest.setFairShare(Resources.clone(totalResources));
}
@Override
public void computeSteadyShares(Collection<? extends FSQueue> queues,
Resource totalResources) {
// Nothing needs to do, as leaf queue doesn't have to calculate steady
// fair shares for applications.
}
@Override
public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
throw new UnsupportedOperationException(
"FifoPolicy doesn't support checkIfUsageOverFairshare operation, " +
"as FifoPolicy only works for FSLeafQueue.");
}
@Override
public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) {
return usage.getMemory() > maxAMResource.getMemory();
}
@Override
public Resource getHeadroom(Resource queueFairShare,
Resource queueUsage, Resource maxAvailable) {
int queueAvailableMemory = Math.max(
queueFairShare.getMemory() - queueUsage.getMemory(), 0);
Resource headroom = Resources.createResource(
Math.min(maxAvailable.getMemory(), queueAvailableMemory),
maxAvailable.getVirtualCores());
return headroom;
}
@Override
public byte getApplicableDepth() {
return SchedulingPolicy.DEPTH_LEAF;
}
}
| 4,763 | 33.521739 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
import java.util.Collection;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import static org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType.*;
/**
* Makes scheduling decisions by trying to equalize dominant resource usage.
* A schedulable's dominant resource usage is the largest ratio of resource
* usage to capacity among the resource types it is using.
*/
@Private
@Unstable
public class DominantResourceFairnessPolicy extends SchedulingPolicy {
public static final String NAME = "DRF";
private static final DominantResourceFairnessComparator COMPARATOR =
new DominantResourceFairnessComparator();
private static final DominantResourceCalculator CALCULATOR =
new DominantResourceCalculator();
@Override
public String getName() {
return NAME;
}
@Override
public byte getApplicableDepth() {
return SchedulingPolicy.DEPTH_ANY;
}
@Override
public Comparator<Schedulable> getComparator() {
return COMPARATOR;
}
@Override
public ResourceCalculator getResourceCalculator() {
return CALCULATOR;
}
@Override
public void computeShares(Collection<? extends Schedulable> schedulables,
Resource totalResources) {
for (ResourceType type : ResourceType.values()) {
ComputeFairShares.computeShares(schedulables, totalResources, type);
}
}
@Override
public void computeSteadyShares(Collection<? extends FSQueue> queues,
Resource totalResources) {
for (ResourceType type : ResourceType.values()) {
ComputeFairShares.computeSteadyShares(queues, totalResources, type);
}
}
@Override
public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
return !Resources.fitsIn(usage, fairShare);
}
@Override
public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) {
return !Resources.fitsIn(usage, maxAMResource);
}
@Override
public Resource getHeadroom(Resource queueFairShare, Resource queueUsage,
Resource maxAvailable) {
int queueAvailableMemory =
Math.max(queueFairShare.getMemory() - queueUsage.getMemory(), 0);
int queueAvailableCPU =
Math.max(queueFairShare.getVirtualCores() - queueUsage
.getVirtualCores(), 0);
Resource headroom = Resources.createResource(
Math.min(maxAvailable.getMemory(), queueAvailableMemory),
Math.min(maxAvailable.getVirtualCores(),
queueAvailableCPU));
return headroom;
}
@Override
public void initialize(Resource clusterCapacity) {
COMPARATOR.setClusterCapacity(clusterCapacity);
}
public static class DominantResourceFairnessComparator implements Comparator<Schedulable> {
private static final int NUM_RESOURCES = ResourceType.values().length;
private Resource clusterCapacity;
public void setClusterCapacity(Resource clusterCapacity) {
this.clusterCapacity = clusterCapacity;
}
@Override
public int compare(Schedulable s1, Schedulable s2) {
ResourceWeights sharesOfCluster1 = new ResourceWeights();
ResourceWeights sharesOfCluster2 = new ResourceWeights();
ResourceWeights sharesOfMinShare1 = new ResourceWeights();
ResourceWeights sharesOfMinShare2 = new ResourceWeights();
ResourceType[] resourceOrder1 = new ResourceType[NUM_RESOURCES];
ResourceType[] resourceOrder2 = new ResourceType[NUM_RESOURCES];
// Calculate shares of the cluster for each resource both schedulables.
calculateShares(s1.getResourceUsage(),
clusterCapacity, sharesOfCluster1, resourceOrder1, s1.getWeights());
calculateShares(s1.getResourceUsage(),
s1.getMinShare(), sharesOfMinShare1, null, ResourceWeights.NEUTRAL);
calculateShares(s2.getResourceUsage(),
clusterCapacity, sharesOfCluster2, resourceOrder2, s2.getWeights());
calculateShares(s2.getResourceUsage(),
s2.getMinShare(), sharesOfMinShare2, null, ResourceWeights.NEUTRAL);
// A queue is needy for its min share if its dominant resource
// (with respect to the cluster capacity) is below its configured min share
// for that resource
boolean s1Needy = sharesOfMinShare1.getWeight(resourceOrder1[0]) < 1.0f;
boolean s2Needy = sharesOfMinShare2.getWeight(resourceOrder2[0]) < 1.0f;
int res = 0;
if (!s2Needy && !s1Needy) {
res = compareShares(sharesOfCluster1, sharesOfCluster2,
resourceOrder1, resourceOrder2);
} else if (s1Needy && !s2Needy) {
res = -1;
} else if (s2Needy && !s1Needy) {
res = 1;
} else { // both are needy below min share
res = compareShares(sharesOfMinShare1, sharesOfMinShare2,
resourceOrder1, resourceOrder2);
}
if (res == 0) {
// Apps are tied in fairness ratio. Break the tie by submit time.
res = (int)(s1.getStartTime() - s2.getStartTime());
}
return res;
}
/**
* Calculates and orders a resource's share of a pool in terms of two vectors.
* The shares vector contains, for each resource, the fraction of the pool that
* it takes up. The resourceOrder vector contains an ordering of resources
* by largest share. So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>,
* shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY].
*/
void calculateShares(Resource resource, Resource pool,
ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) {
shares.setWeight(MEMORY, (float)resource.getMemory() /
(pool.getMemory() * weights.getWeight(MEMORY)));
shares.setWeight(CPU, (float)resource.getVirtualCores() /
(pool.getVirtualCores() * weights.getWeight(CPU)));
// sort order vector by resource share
if (resourceOrder != null) {
if (shares.getWeight(MEMORY) > shares.getWeight(CPU)) {
resourceOrder[0] = MEMORY;
resourceOrder[1] = CPU;
} else {
resourceOrder[0] = CPU;
resourceOrder[1] = MEMORY;
}
}
}
private int compareShares(ResourceWeights shares1, ResourceWeights shares2,
ResourceType[] resourceOrder1, ResourceType[] resourceOrder2) {
for (int i = 0; i < resourceOrder1.length; i++) {
int ret = (int)Math.signum(shares1.getWeight(resourceOrder1[i])
- shares2.getWeight(resourceOrder2[i]));
if (ret != 0) {
return ret;
}
}
return 0;
}
}
}
| 8,216 | 37.759434 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
/**
* Contains logic for computing the fair shares. A {@link Schedulable}'s fair
* share is {@link Resource} it is entitled to, independent of the current
* demands and allocations on the cluster. A {@link Schedulable} whose resource
* consumption lies at or below its fair share will never have its containers
* preempted.
*/
public class ComputeFairShares {
private static final int COMPUTE_FAIR_SHARES_ITERATIONS = 25;
/**
* Compute fair share of the given schedulables.Fair share is an allocation of
* shares considering only active schedulables ie schedulables which have
* running apps.
*
* @param schedulables
* @param totalResources
* @param type
*/
public static void computeShares(
Collection<? extends Schedulable> schedulables, Resource totalResources,
ResourceType type) {
computeSharesInternal(schedulables, totalResources, type, false);
}
/**
* Compute the steady fair share of the given queues. The steady fair
* share is an allocation of shares considering all queues, i.e.,
* active and inactive.
*
* @param queues
* @param totalResources
* @param type
*/
public static void computeSteadyShares(
Collection<? extends FSQueue> queues, Resource totalResources,
ResourceType type) {
computeSharesInternal(queues, totalResources, type, true);
}
/**
* Given a set of Schedulables and a number of slots, compute their weighted
* fair shares. The min and max shares and of the Schedulables are assumed to
* be set beforehand. We compute the fairest possible allocation of shares to
* the Schedulables that respects their min and max shares.
* <p>
* To understand what this method does, we must first define what weighted
* fair sharing means in the presence of min and max shares. If there
* were no minimum or maximum shares, then weighted fair sharing would be
* achieved if the ratio of slotsAssigned / weight was equal for each
* Schedulable and all slots were assigned. Minimum and maximum shares add a
* further twist - Some Schedulables may have a min share higher than their
* assigned share or a max share lower than their assigned share.
* <p>
* To deal with these possibilities, we define an assignment of slots as being
* fair if there exists a ratio R such that: Schedulables S where S.minShare
* {@literal >} R * S.weight are given share S.minShare - Schedulables S
* where S.maxShare {@literal <} R * S.weight are given S.maxShare -
* All other Schedulables S are assigned share R * S.weight -
* The sum of all the shares is totalSlots.
* <p>
* We call R the weight-to-slots ratio because it converts a Schedulable's
* weight to the number of slots it is assigned.
* <p>
* We compute a fair allocation by finding a suitable weight-to-slot ratio R.
* To do this, we use binary search. Given a ratio R, we compute the number of
* slots that would be used in total with this ratio (the sum of the shares
* computed using the conditions above). If this number of slots is less than
* totalSlots, then R is too small and more slots could be assigned. If the
* number of slots is more than totalSlots, then R is too large.
* <p>
* We begin the binary search with a lower bound on R of 0 (which means that
* all Schedulables are only given their minShare) and an upper bound computed
* to be large enough that too many slots are given (by doubling R until we
* use more than totalResources resources). The helper method
* resourceUsedWithWeightToResourceRatio computes the total resources used with a
* given value of R.
* <p>
* The running time of this algorithm is linear in the number of Schedulables,
* because resourceUsedWithWeightToResourceRatio is linear-time and the number of
* iterations of binary search is a constant (dependent on desired precision).
*/
private static void computeSharesInternal(
Collection<? extends Schedulable> allSchedulables,
Resource totalResources, ResourceType type, boolean isSteadyShare) {
Collection<Schedulable> schedulables = new ArrayList<Schedulable>();
int takenResources = handleFixedFairShares(
allSchedulables, schedulables, isSteadyShare, type);
if (schedulables.isEmpty()) {
return;
}
// Find an upper bound on R that we can use in our binary search. We start
// at R = 1 and double it until we have either used all the resources or we
// have met all Schedulables' max shares.
int totalMaxShare = 0;
for (Schedulable sched : schedulables) {
int maxShare = getResourceValue(sched.getMaxShare(), type);
totalMaxShare = (int) Math.min((long)maxShare + (long)totalMaxShare,
Integer.MAX_VALUE);
if (totalMaxShare == Integer.MAX_VALUE) {
break;
}
}
int totalResource = Math.max((getResourceValue(totalResources, type) -
takenResources), 0);
totalResource = Math.min(totalMaxShare, totalResource);
double rMax = 1.0;
while (resourceUsedWithWeightToResourceRatio(rMax, schedulables, type)
< totalResource) {
rMax *= 2.0;
}
// Perform the binary search for up to COMPUTE_FAIR_SHARES_ITERATIONS steps
double left = 0;
double right = rMax;
for (int i = 0; i < COMPUTE_FAIR_SHARES_ITERATIONS; i++) {
double mid = (left + right) / 2.0;
int plannedResourceUsed = resourceUsedWithWeightToResourceRatio(
mid, schedulables, type);
if (plannedResourceUsed == totalResource) {
right = mid;
break;
} else if (plannedResourceUsed < totalResource) {
left = mid;
} else {
right = mid;
}
}
// Set the fair shares based on the value of R we've converged to
for (Schedulable sched : schedulables) {
if (isSteadyShare) {
setResourceValue(computeShare(sched, right, type),
((FSQueue) sched).getSteadyFairShare(), type);
} else {
setResourceValue(
computeShare(sched, right, type), sched.getFairShare(), type);
}
}
}
/**
* Compute the resources that would be used given a weight-to-resource ratio
* w2rRatio, for use in the computeFairShares algorithm as described in #
*/
private static int resourceUsedWithWeightToResourceRatio(double w2rRatio,
Collection<? extends Schedulable> schedulables, ResourceType type) {
int resourcesTaken = 0;
for (Schedulable sched : schedulables) {
int share = computeShare(sched, w2rRatio, type);
resourcesTaken += share;
}
return resourcesTaken;
}
/**
* Compute the resources assigned to a Schedulable given a particular
* weight-to-resource ratio w2rRatio.
*/
private static int computeShare(Schedulable sched, double w2rRatio,
ResourceType type) {
double share = sched.getWeights().getWeight(type) * w2rRatio;
share = Math.max(share, getResourceValue(sched.getMinShare(), type));
share = Math.min(share, getResourceValue(sched.getMaxShare(), type));
return (int) share;
}
/**
* Helper method to handle Schedulabes with fixed fairshares.
* Returns the resources taken by fixed fairshare schedulables,
* and adds the remaining to the passed nonFixedSchedulables.
*/
private static int handleFixedFairShares(
Collection<? extends Schedulable> schedulables,
Collection<Schedulable> nonFixedSchedulables,
boolean isSteadyShare, ResourceType type) {
int totalResource = 0;
for (Schedulable sched : schedulables) {
int fixedShare = getFairShareIfFixed(sched, isSteadyShare, type);
if (fixedShare < 0) {
nonFixedSchedulables.add(sched);
} else {
setResourceValue(fixedShare,
isSteadyShare
? ((FSQueue)sched).getSteadyFairShare()
: sched.getFairShare(),
type);
totalResource = (int) Math.min((long)totalResource + (long)fixedShare,
Integer.MAX_VALUE);
}
}
return totalResource;
}
/**
* Get the fairshare for the {@link Schedulable} if it is fixed, -1 otherwise.
*
* The fairshare is fixed if either the maxShare is 0, weight is 0,
* or the Schedulable is not active for instantaneous fairshare.
*/
private static int getFairShareIfFixed(Schedulable sched,
boolean isSteadyShare, ResourceType type) {
// Check if maxShare is 0
if (getResourceValue(sched.getMaxShare(), type) <= 0) {
return 0;
}
// For instantaneous fairshares, check if queue is active
if (!isSteadyShare &&
(sched instanceof FSQueue) && !((FSQueue)sched).isActive()) {
return 0;
}
// Check if weight is 0
if (sched.getWeights().getWeight(type) <= 0) {
int minShare = getResourceValue(sched.getMinShare(), type);
return (minShare <= 0) ? 0 : minShare;
}
return -1;
}
private static int getResourceValue(Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
return resource.getMemory();
case CPU:
return resource.getVirtualCores();
default:
throw new IllegalArgumentException("Invalid resource");
}
}
private static void setResourceValue(int val, Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
resource.setMemory(val);
break;
case CPU:
resource.setVirtualCores(val);
break;
default:
throw new IllegalArgumentException("Invalid resource");
}
}
}
| 10,768 | 37.598566 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
import java.io.Serializable;
import java.util.Collection;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
/**
* Makes scheduling decisions by trying to equalize shares of memory.
*/
@Private
@Unstable
public class FairSharePolicy extends SchedulingPolicy {
@VisibleForTesting
public static final String NAME = "fair";
private static final DefaultResourceCalculator RESOURCE_CALCULATOR =
new DefaultResourceCalculator();
private static final FairShareComparator COMPARATOR =
new FairShareComparator();
@Override
public String getName() {
return NAME;
}
/**
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
* below their min share get priority over those whose min share is met.
*
* Schedulables below their min share are compared by how far below it they
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
* and job B has 50 out of a min share of 100, then job B is scheduled next,
* because B is at 50% of its min share and A is at 80% of its min share.
*
* Schedulables above their min share are compared by (runningTasks / weight).
* If all weights are equal, slots are given to the job with the fewest tasks;
* otherwise, jobs with more weight get proportionally more slots.
*/
private static class FairShareComparator implements Comparator<Schedulable>,
Serializable {
private static final long serialVersionUID = 5564969375856699313L;
private static final Resource ONE = Resources.createResource(1);
@Override
public int compare(Schedulable s1, Schedulable s2) {
double minShareRatio1, minShareRatio2;
double useToWeightRatio1, useToWeightRatio2;
Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
s1.getMinShare(), s1.getDemand());
Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
s2.getMinShare(), s2.getDemand());
boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
s1.getResourceUsage(), minShare1);
boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
s2.getResourceUsage(), minShare2);
minShareRatio1 = (double) s1.getResourceUsage().getMemory()
/ Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemory();
minShareRatio2 = (double) s2.getResourceUsage().getMemory()
/ Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemory();
useToWeightRatio1 = s1.getResourceUsage().getMemory() /
s1.getWeights().getWeight(ResourceType.MEMORY);
useToWeightRatio2 = s2.getResourceUsage().getMemory() /
s2.getWeights().getWeight(ResourceType.MEMORY);
int res = 0;
if (s1Needy && !s2Needy)
res = -1;
else if (s2Needy && !s1Needy)
res = 1;
else if (s1Needy && s2Needy)
res = (int) Math.signum(minShareRatio1 - minShareRatio2);
else
// Neither schedulable is needy
res = (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
if (res == 0) {
// Apps are tied in fairness ratio. Break the tie by submit time and job
// name to get a deterministic ordering, which is useful for unit tests.
res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
if (res == 0)
res = s1.getName().compareTo(s2.getName());
}
return res;
}
}
@Override
public Comparator<Schedulable> getComparator() {
return COMPARATOR;
}
@Override
public ResourceCalculator getResourceCalculator() {
return RESOURCE_CALCULATOR;
}
@Override
public Resource getHeadroom(Resource queueFairShare,
Resource queueUsage, Resource maxAvailable) {
int queueAvailableMemory = Math.max(
queueFairShare.getMemory() - queueUsage.getMemory(), 0);
Resource headroom = Resources.createResource(
Math.min(maxAvailable.getMemory(), queueAvailableMemory),
maxAvailable.getVirtualCores());
return headroom;
}
@Override
public void computeShares(Collection<? extends Schedulable> schedulables,
Resource totalResources) {
ComputeFairShares.computeShares(schedulables, totalResources, ResourceType.MEMORY);
}
@Override
public void computeSteadyShares(Collection<? extends FSQueue> queues,
Resource totalResources) {
ComputeFairShares.computeSteadyShares(queues, totalResources,
ResourceType.MEMORY);
}
@Override
public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
return Resources.greaterThan(RESOURCE_CALCULATOR, null, usage, fairShare);
}
@Override
public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) {
return usage.getMemory() > maxAMResource.getMemory();
}
@Override
public byte getApplicableDepth() {
return SchedulingPolicy.DEPTH_ANY;
}
}
| 6,546 | 39.165644 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@Metrics(context = "yarn")
public class CSQueueMetrics extends QueueMetrics {
@Metric("AM memory limit in MB")
MutableGaugeInt AMResourceLimitMB;
@Metric("AM CPU limit in virtual cores")
MutableGaugeInt AMResourceLimitVCores;
@Metric("Used AM memory limit in MB")
MutableGaugeInt usedAMResourceMB;
@Metric("Used AM CPU limit in virtual cores")
MutableGaugeInt usedAMResourceVCores;
CSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
super(ms, queueName, parent, enableUserMetrics, conf);
}
public int getAMResourceLimitMB() {
return AMResourceLimitMB.value();
}
public int getAMResourceLimitVCores() {
return AMResourceLimitVCores.value();
}
public int getUsedAMResourceMB() {
return usedAMResourceMB.value();
}
public int getUsedAMResourceVCores() {
return usedAMResourceVCores.value();
}
public void setAMResouceLimit(Resource res) {
AMResourceLimitMB.set(res.getMemory());
AMResourceLimitVCores.set(res.getVirtualCores());
}
public void setAMResouceLimitForUser(String user, Resource res) {
CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
if (userMetrics != null) {
userMetrics.setAMResouceLimit(res);
}
}
public void incAMUsed(String user, Resource res) {
usedAMResourceMB.incr(res.getMemory());
usedAMResourceVCores.incr(res.getVirtualCores());
CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
if (userMetrics != null) {
userMetrics.incAMUsed(user, res);
}
}
public void decAMUsed(String user, Resource res) {
usedAMResourceMB.decr(res.getMemory());
usedAMResourceVCores.decr(res.getVirtualCores());
CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
if (userMetrics != null) {
userMetrics.decAMUsed(user, res);
}
}
public synchronized static CSQueueMetrics forQueue(String queueName,
Queue parent, boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics =
new CSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(queueName).toString(), "Metrics for queue: "
+ queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (CSQueueMetrics) metrics;
}
@Override
public synchronized QueueMetrics getUserMetrics(String userName) {
if (users == null) {
return null;
}
CSQueueMetrics metrics = (CSQueueMetrics) users.get(userName);
if (metrics == null) {
metrics = new CSQueueMetrics(metricsSystem, queueName, null, false, conf);
users.put(userName, metrics);
metricsSystem.register(
sourceName(queueName).append(",user=").append(userName).toString(),
"Metrics for user '" + userName + "' in queue '" + queueName + "'",
((CSQueueMetrics) metrics.tag(QUEUE_INFO, queueName)).tag(USER_INFO,
userName));
}
return metrics;
}
}
| 4,692 | 34.022388 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.Comparator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
/**
* Read-only interface to {@link CapacityScheduler} context.
*/
public interface CapacitySchedulerContext {
CapacitySchedulerConfiguration getConfiguration();
Resource getMinimumResourceCapability();
Resource getMaximumResourceCapability();
Resource getMaximumResourceCapability(String queueName);
RMContainerTokenSecretManager getContainerTokenSecretManager();
int getNumClusterNodes();
RMContext getRMContext();
Resource getClusterResource();
/**
* Get the yarn configuration.
*/
Configuration getConf();
Comparator<FiCaSchedulerApp> getApplicationComparator();
ResourceCalculator getResourceCalculator();
Comparator<CSQueue> getNonPartitionedQueueComparator();
PartitionedQueueComparator getPartitionedQueueComparator();
FiCaSchedulerNode getNode(NodeId nodeId);
}
| 2,285 | 33.119403 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueCapacities.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
public class QueueCapacities {
private static final String NL = CommonNodeLabelsManager.NO_LABEL;
private static final float LABEL_DOESNT_EXIST_CAP = 0f;
private Map<String, Capacities> capacitiesMap;
private ReadLock readLock;
private WriteLock writeLock;
private final boolean isRoot;
public QueueCapacities(boolean isRoot) {
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
capacitiesMap = new HashMap<String, Capacities>();
this.isRoot = isRoot;
}
// Usage enum here to make implement cleaner
private enum CapacityType {
USED_CAP(0), ABS_USED_CAP(1), MAX_CAP(2), ABS_MAX_CAP(3), CAP(4), ABS_CAP(5);
private int idx;
private CapacityType(int idx) {
this.idx = idx;
}
}
private static class Capacities {
private float[] capacitiesArr;
public Capacities() {
capacitiesArr = new float[CapacityType.values().length];
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{used=" + capacitiesArr[0] + "%, ");
sb.append("abs_used=" + capacitiesArr[1] + "%, ");
sb.append("max_cap=" + capacitiesArr[2] + "%, ");
sb.append("abs_max_cap=" + capacitiesArr[3] + "%, ");
sb.append("cap=" + capacitiesArr[4] + "%, ");
sb.append("abs_cap=" + capacitiesArr[5] + "%}");
return sb.toString();
}
}
private float _get(String label, CapacityType type) {
try {
readLock.lock();
Capacities cap = capacitiesMap.get(label);
if (null == cap) {
return LABEL_DOESNT_EXIST_CAP;
}
return cap.capacitiesArr[type.idx];
} finally {
readLock.unlock();
}
}
private void _set(String label, CapacityType type, float value) {
try {
writeLock.lock();
Capacities cap = capacitiesMap.get(label);
if (null == cap) {
cap = new Capacities();
capacitiesMap.put(label, cap);
}
cap.capacitiesArr[type.idx] = value;
} finally {
writeLock.unlock();
}
}
/* Used Capacity Getter and Setter */
public float getUsedCapacity() {
return _get(NL, CapacityType.USED_CAP);
}
public float getUsedCapacity(String label) {
return _get(label, CapacityType.USED_CAP);
}
public void setUsedCapacity(float value) {
_set(NL, CapacityType.USED_CAP, value);
}
public void setUsedCapacity(String label, float value) {
_set(label, CapacityType.USED_CAP, value);
}
/* Absolute Used Capacity Getter and Setter */
public float getAbsoluteUsedCapacity() {
return _get(NL, CapacityType.ABS_USED_CAP);
}
public float getAbsoluteUsedCapacity(String label) {
return _get(label, CapacityType.ABS_USED_CAP);
}
public void setAbsoluteUsedCapacity(float value) {
_set(NL, CapacityType.ABS_USED_CAP, value);
}
public void setAbsoluteUsedCapacity(String label, float value) {
_set(label, CapacityType.ABS_USED_CAP, value);
}
/* Capacity Getter and Setter */
public float getCapacity() {
return _get(NL, CapacityType.CAP);
}
public float getCapacity(String label) {
if (StringUtils.equals(label, RMNodeLabelsManager.NO_LABEL) && isRoot) {
return 1f;
}
return _get(label, CapacityType.CAP);
}
public void setCapacity(float value) {
_set(NL, CapacityType.CAP, value);
}
public void setCapacity(String label, float value) {
_set(label, CapacityType.CAP, value);
}
/* Absolute Capacity Getter and Setter */
public float getAbsoluteCapacity() {
return _get(NL, CapacityType.ABS_CAP);
}
public float getAbsoluteCapacity(String label) {
if (StringUtils.equals(label, RMNodeLabelsManager.NO_LABEL) && isRoot) {
return 1f;
}
return _get(label, CapacityType.ABS_CAP);
}
public void setAbsoluteCapacity(float value) {
_set(NL, CapacityType.ABS_CAP, value);
}
public void setAbsoluteCapacity(String label, float value) {
_set(label, CapacityType.ABS_CAP, value);
}
/* Maximum Capacity Getter and Setter */
public float getMaximumCapacity() {
return _get(NL, CapacityType.MAX_CAP);
}
public float getMaximumCapacity(String label) {
return _get(label, CapacityType.MAX_CAP);
}
public void setMaximumCapacity(float value) {
_set(NL, CapacityType.MAX_CAP, value);
}
public void setMaximumCapacity(String label, float value) {
_set(label, CapacityType.MAX_CAP, value);
}
/* Absolute Maximum Capacity Getter and Setter */
public float getAbsoluteMaximumCapacity() {
return _get(NL, CapacityType.ABS_MAX_CAP);
}
public float getAbsoluteMaximumCapacity(String label) {
return _get(label, CapacityType.ABS_MAX_CAP);
}
public void setAbsoluteMaximumCapacity(float value) {
_set(NL, CapacityType.ABS_MAX_CAP, value);
}
public void setAbsoluteMaximumCapacity(String label, float value) {
_set(label, CapacityType.ABS_MAX_CAP, value);
}
/**
* Clear configurable fields, like
* (absolute)capacity/(absolute)maximum-capacity, this will be used by queue
* reinitialize, when we reinitialize a queue, we will first clear all
* configurable fields, and load new values
*/
public void clearConfigurableFields() {
try {
writeLock.lock();
for (String label : capacitiesMap.keySet()) {
_set(label, CapacityType.CAP, 0);
_set(label, CapacityType.MAX_CAP, 0);
_set(label, CapacityType.ABS_CAP, 0);
_set(label, CapacityType.ABS_MAX_CAP, 0);
}
} finally {
writeLock.unlock();
}
}
public Set<String> getExistingNodeLabels() {
try {
readLock.lock();
return new HashSet<String>(capacitiesMap.keySet());
} finally {
readLock.unlock();
}
}
@Override
public String toString() {
try {
readLock.lock();
return this.capacitiesMap.toString();
} finally {
readLock.unlock();
}
}
public Set<String> getNodePartitionsSet() {
try {
readLock.lock();
return capacitiesMap.keySet();
} finally {
readLock.unlock();
}
}
}
| 7,522 | 27.388679 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.api.records.Resource;
public class CapacityHeadroomProvider {
LeafQueue.User user;
LeafQueue queue;
FiCaSchedulerApp application;
LeafQueue.QueueResourceLimitsInfo queueResourceLimitsInfo;
public CapacityHeadroomProvider(LeafQueue.User user, LeafQueue queue,
FiCaSchedulerApp application,
LeafQueue.QueueResourceLimitsInfo queueResourceLimitsInfo) {
this.user = user;
this.queue = queue;
this.application = application;
this.queueResourceLimitsInfo = queueResourceLimitsInfo;
}
public Resource getHeadroom() {
Resource queueCurrentLimit;
Resource clusterResource;
synchronized (queueResourceLimitsInfo) {
queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit();
clusterResource = queueResourceLimitsInfo.getClusterResource();
}
Resource headroom = queue.getHeadroom(user, queueCurrentLimit,
clusterResource, application);
// Corner case to deal with applications being slightly over-limit
if (headroom.getMemory() < 0) {
headroom.setMemory(0);
}
return headroom;
}
}
| 2,113 | 34.233333 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.collect.ImmutableSet;
public class CapacitySchedulerConfiguration extends ReservationSchedulerConfiguration {
private static final Log LOG =
LogFactory.getLog(CapacitySchedulerConfiguration.class);
private static final String CS_CONFIGURATION_FILE = "capacity-scheduler.xml";
@Private
public static final String PREFIX = "yarn.scheduler.capacity.";
@Private
public static final String DOT = ".";
@Private
public static final String MAXIMUM_APPLICATIONS_SUFFIX =
"maximum-applications";
@Private
public static final String MAXIMUM_SYSTEM_APPLICATIONS =
PREFIX + MAXIMUM_APPLICATIONS_SUFFIX;
@Private
public static final String MAXIMUM_AM_RESOURCE_SUFFIX =
"maximum-am-resource-percent";
@Private
public static final String MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT =
PREFIX + MAXIMUM_AM_RESOURCE_SUFFIX;
@Private
public static final String QUEUES = "queues";
@Private
public static final String CAPACITY = "capacity";
@Private
public static final String MAXIMUM_CAPACITY = "maximum-capacity";
@Private
public static final String USER_LIMIT = "minimum-user-limit-percent";
@Private
public static final String USER_LIMIT_FACTOR = "user-limit-factor";
@Private
public static final String STATE = "state";
@Private
public static final String ACCESSIBLE_NODE_LABELS = "accessible-node-labels";
@Private
public static final String DEFAULT_NODE_LABEL_EXPRESSION =
"default-node-label-expression";
public static final String RESERVE_CONT_LOOK_ALL_NODES = PREFIX
+ "reservations-continue-look-all-nodes";
@Private
public static final boolean DEFAULT_RESERVE_CONT_LOOK_ALL_NODES = true;
@Private
public static final String MAXIMUM_ALLOCATION_MB = "maximum-allocation-mb";
@Private
public static final String MAXIMUM_ALLOCATION_VCORES =
"maximum-allocation-vcores";
public static final String ORDERING_POLICY = "ordering-policy";
public static final String FIFO_ORDERING_POLICY = "fifo";
public static final String FAIR_ORDERING_POLICY = "fair";
public static final String DEFAULT_ORDERING_POLICY = FIFO_ORDERING_POLICY;
@Private
public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000;
@Private
public static final float
DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f;
@Private
public static final float UNDEFINED = -1;
@Private
public static final float MINIMUM_CAPACITY_VALUE = 0;
@Private
public static final float MAXIMUM_CAPACITY_VALUE = 100;
@Private
public static final float DEFAULT_MAXIMUM_CAPACITY_VALUE = -1.0f;
@Private
public static final int DEFAULT_USER_LIMIT = 100;
@Private
public static final float DEFAULT_USER_LIMIT_FACTOR = 1.0f;
@Private
public static final String ALL_ACL = "*";
@Private
public static final String NONE_ACL = " ";
@Private public static final String ENABLE_USER_METRICS =
PREFIX +"user-metrics.enable";
@Private public static final boolean DEFAULT_ENABLE_USER_METRICS = false;
/** ResourceComparator for scheduling. */
@Private public static final String RESOURCE_CALCULATOR_CLASS =
PREFIX + "resource-calculator";
@Private public static final Class<? extends ResourceCalculator>
DEFAULT_RESOURCE_CALCULATOR_CLASS = DefaultResourceCalculator.class;
@Private
public static final String ROOT = "root";
@Private
public static final String NODE_LOCALITY_DELAY =
PREFIX + "node-locality-delay";
@Private
public static final int DEFAULT_NODE_LOCALITY_DELAY = 40;
@Private
public static final String SCHEDULE_ASYNCHRONOUSLY_PREFIX =
PREFIX + "schedule-asynchronously";
@Private
public static final String SCHEDULE_ASYNCHRONOUSLY_ENABLE =
SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".enable";
@Private
public static final boolean DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE = false;
@Private
public static final String QUEUE_MAPPING = PREFIX + "queue-mappings";
@Private
public static final String ENABLE_QUEUE_MAPPING_OVERRIDE = QUEUE_MAPPING + "-override.enable";
@Private
public static final boolean DEFAULT_ENABLE_QUEUE_MAPPING_OVERRIDE = false;
@Private
public static final String QUEUE_PREEMPTION_DISABLED = "disable_preemption";
@Private
public static final String DEFAULT_APPLICATION_PRIORITY = "default-application-priority";
@Private
public static final Integer DEFAULT_CONFIGURATION_APPLICATION_PRIORITY = 0;
@Private
public static class QueueMapping {
public enum MappingType {
USER("u"),
GROUP("g");
private final String type;
private MappingType(String type) {
this.type = type;
}
public String toString() {
return type;
}
};
MappingType type;
String source;
String queue;
public QueueMapping(MappingType type, String source, String queue) {
this.type = type;
this.source = source;
this.queue = queue;
}
}
@Private
public static final String AVERAGE_CAPACITY = "average-capacity";
@Private
public static final String IS_RESERVABLE = "reservable";
@Private
public static final String RESERVATION_WINDOW = "reservation-window";
@Private
public static final String INSTANTANEOUS_MAX_CAPACITY =
"instantaneous-max-capacity";
@Private
public static final String RESERVATION_ADMISSION_POLICY =
"reservation-policy";
@Private
public static final String RESERVATION_AGENT_NAME = "reservation-agent";
@Private
public static final String RESERVATION_SHOW_RESERVATION_AS_QUEUE =
"show-reservations-as-queues";
@Private
public static final String RESERVATION_PLANNER_NAME = "reservation-planner";
@Private
public static final String RESERVATION_MOVE_ON_EXPIRY =
"reservation-move-on-expiry";
@Private
public static final String RESERVATION_ENFORCEMENT_WINDOW =
"reservation-enforcement-window";
public CapacitySchedulerConfiguration() {
this(new Configuration());
}
public CapacitySchedulerConfiguration(Configuration configuration) {
this(configuration, true);
}
public CapacitySchedulerConfiguration(Configuration configuration,
boolean useLocalConfigurationProvider) {
super(configuration);
if (useLocalConfigurationProvider) {
addResource(CS_CONFIGURATION_FILE);
}
}
static String getQueuePrefix(String queue) {
String queueName = PREFIX + queue + DOT;
return queueName;
}
private String getNodeLabelPrefix(String queue, String label) {
if (label.equals(CommonNodeLabelsManager.NO_LABEL)) {
return getQueuePrefix(queue);
}
return getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS + DOT + label + DOT;
}
public int getMaximumSystemApplications() {
int maxApplications =
getInt(MAXIMUM_SYSTEM_APPLICATIONS, DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS);
return maxApplications;
}
public float getMaximumApplicationMasterResourcePercent() {
return getFloat(MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,
DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT);
}
/**
* Get the maximum applications per queue setting.
* @param queue name of the queue
* @return setting specified or -1 if not set
*/
public int getMaximumApplicationsPerQueue(String queue) {
int maxApplicationsPerQueue =
getInt(getQueuePrefix(queue) + MAXIMUM_APPLICATIONS_SUFFIX,
(int)UNDEFINED);
return maxApplicationsPerQueue;
}
/**
* Get the maximum am resource percent per queue setting.
* @param queue name of the queue
* @return per queue setting or defaults to the global am-resource-percent
* setting if per queue setting not present
*/
public float getMaximumApplicationMasterResourcePerQueuePercent(String queue) {
return getFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX,
getMaximumApplicationMasterResourcePercent());
}
public void setMaximumApplicationMasterResourcePerQueuePercent(String queue,
float percent) {
setFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX, percent);
}
public float getNonLabeledQueueCapacity(String queue) {
float capacity = queue.equals("root") ? 100.0f : getFloat(
getQueuePrefix(queue) + CAPACITY, UNDEFINED);
if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) {
throw new IllegalArgumentException("Illegal " +
"capacity of " + capacity + " for queue " + queue);
}
LOG.debug("CSConf - getCapacity: queuePrefix=" + getQueuePrefix(queue) +
", capacity=" + capacity);
return capacity;
}
public void setCapacity(String queue, float capacity) {
if (queue.equals("root")) {
throw new IllegalArgumentException(
"Cannot set capacity, root queue has a fixed capacity of 100.0f");
}
setFloat(getQueuePrefix(queue) + CAPACITY, capacity);
LOG.debug("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) +
", capacity=" + capacity);
}
public float getNonLabeledQueueMaximumCapacity(String queue) {
float maxCapacity = getFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY,
MAXIMUM_CAPACITY_VALUE);
maxCapacity = (maxCapacity == DEFAULT_MAXIMUM_CAPACITY_VALUE) ?
MAXIMUM_CAPACITY_VALUE : maxCapacity;
return maxCapacity;
}
public void setMaximumCapacity(String queue, float maxCapacity) {
if (maxCapacity > MAXIMUM_CAPACITY_VALUE) {
throw new IllegalArgumentException("Illegal " +
"maximum-capacity of " + maxCapacity + " for queue " + queue);
}
setFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
LOG.debug("CSConf - setMaxCapacity: queuePrefix=" + getQueuePrefix(queue) +
", maxCapacity=" + maxCapacity);
}
public void setCapacityByLabel(String queue, String label, float capacity) {
setFloat(getNodeLabelPrefix(queue, label) + CAPACITY, capacity);
}
public void setMaximumCapacityByLabel(String queue, String label,
float capacity) {
setFloat(getNodeLabelPrefix(queue, label) + MAXIMUM_CAPACITY, capacity);
}
public int getUserLimit(String queue) {
int userLimit = getInt(getQueuePrefix(queue) + USER_LIMIT,
DEFAULT_USER_LIMIT);
return userLimit;
}
@SuppressWarnings("unchecked")
public <S extends SchedulableEntity> OrderingPolicy<S> getOrderingPolicy(
String queue) {
String policyType = get(getQueuePrefix(queue) + ORDERING_POLICY,
DEFAULT_ORDERING_POLICY);
OrderingPolicy<S> orderingPolicy;
if (policyType.trim().equals(FIFO_ORDERING_POLICY)) {
policyType = FifoOrderingPolicy.class.getName();
}
if (policyType.trim().equals(FAIR_ORDERING_POLICY)) {
policyType = FairOrderingPolicy.class.getName();
}
try {
orderingPolicy = (OrderingPolicy<S>)
Class.forName(policyType).newInstance();
} catch (Exception e) {
String message = "Unable to construct ordering policy for: " + policyType + ", " + e.getMessage();
throw new RuntimeException(message, e);
}
Map<String, String> config = new HashMap<String, String>();
String confPrefix = getQueuePrefix(queue) + ORDERING_POLICY + ".";
for (Map.Entry<String, String> kv : this) {
if (kv.getKey().startsWith(confPrefix)) {
config.put(kv.getKey().substring(confPrefix.length()), kv.getValue());
}
}
orderingPolicy.configure(config);
return orderingPolicy;
}
public void setUserLimit(String queue, int userLimit) {
setInt(getQueuePrefix(queue) + USER_LIMIT, userLimit);
LOG.debug("here setUserLimit: queuePrefix=" + getQueuePrefix(queue) +
", userLimit=" + getUserLimit(queue));
}
public float getUserLimitFactor(String queue) {
float userLimitFactor =
getFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR,
DEFAULT_USER_LIMIT_FACTOR);
return userLimitFactor;
}
public void setUserLimitFactor(String queue, float userLimitFactor) {
setFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, userLimitFactor);
}
public QueueState getState(String queue) {
String state = get(getQueuePrefix(queue) + STATE);
return (state != null) ?
QueueState.valueOf(StringUtils.toUpperCase(state)) : QueueState.RUNNING;
}
public void setAccessibleNodeLabels(String queue, Set<String> labels) {
if (labels == null) {
return;
}
String str = StringUtils.join(",", labels);
set(getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS, str);
}
public Set<String> getAccessibleNodeLabels(String queue) {
String accessibleLabelStr =
get(getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS);
// When accessible-label is null,
if (accessibleLabelStr == null) {
// Only return null when queue is not ROOT
if (!queue.equals(ROOT)) {
return null;
}
} else {
// print a warning when accessibleNodeLabel specified in config and queue
// is ROOT
if (queue.equals(ROOT)) {
LOG.warn("Accessible node labels for root queue will be ignored,"
+ " it will be automatically set to \"*\".");
}
}
// always return ANY for queue root
if (queue.equals(ROOT)) {
return ImmutableSet.of(RMNodeLabelsManager.ANY);
}
// In other cases, split the accessibleLabelStr by ","
Set<String> set = new HashSet<String>();
for (String str : accessibleLabelStr.split(",")) {
if (!str.trim().isEmpty()) {
set.add(str.trim());
}
}
// if labels contains "*", only keep ANY behind
if (set.contains(RMNodeLabelsManager.ANY)) {
set.clear();
set.add(RMNodeLabelsManager.ANY);
}
return Collections.unmodifiableSet(set);
}
private float internalGetLabeledQueueCapacity(String queue, String label, String suffix,
float defaultValue) {
String capacityPropertyName = getNodeLabelPrefix(queue, label) + suffix;
float capacity = getFloat(capacityPropertyName, defaultValue);
if (capacity < MINIMUM_CAPACITY_VALUE
|| capacity > MAXIMUM_CAPACITY_VALUE) {
throw new IllegalArgumentException("Illegal capacity of " + capacity
+ " for node-label=" + label + " in queue=" + queue
+ ", valid capacity should in range of [0, 100].");
}
if (LOG.isDebugEnabled()) {
LOG.debug("CSConf - getCapacityOfLabel: prefix="
+ getNodeLabelPrefix(queue, label) + ", capacity=" + capacity);
}
return capacity;
}
public float getLabeledQueueCapacity(String queue, String label) {
return internalGetLabeledQueueCapacity(queue, label, CAPACITY, 0f);
}
public float getLabeledQueueMaximumCapacity(String queue, String label) {
return internalGetLabeledQueueCapacity(queue, label, MAXIMUM_CAPACITY, 100f);
}
public String getDefaultNodeLabelExpression(String queue) {
String defaultLabelExpression = get(getQueuePrefix(queue)
+ DEFAULT_NODE_LABEL_EXPRESSION);
if (defaultLabelExpression == null) {
return null;
}
return defaultLabelExpression.trim();
}
public void setDefaultNodeLabelExpression(String queue, String exp) {
set(getQueuePrefix(queue) + DEFAULT_NODE_LABEL_EXPRESSION, exp);
}
/*
* Returns whether we should continue to look at all heart beating nodes even
* after the reservation limit was hit. The node heart beating in could
* satisfy the request thus could be a better pick then waiting for the
* reservation to be fullfilled. This config is refreshable.
*/
public boolean getReservationContinueLook() {
return getBoolean(RESERVE_CONT_LOOK_ALL_NODES,
DEFAULT_RESERVE_CONT_LOOK_ALL_NODES);
}
private static String getAclKey(QueueACL acl) {
return "acl_" + StringUtils.toLowerCase(acl.toString());
}
public AccessControlList getAcl(String queue, QueueACL acl) {
String queuePrefix = getQueuePrefix(queue);
// The root queue defaults to all access if not defined
// Sub queues inherit access if not defined
String defaultAcl = queue.equals(ROOT) ? ALL_ACL : NONE_ACL;
String aclString = get(queuePrefix + getAclKey(acl), defaultAcl);
return new AccessControlList(aclString);
}
public void setAcl(String queue, QueueACL acl, String aclString) {
String queuePrefix = getQueuePrefix(queue);
set(queuePrefix + getAclKey(acl), aclString);
}
public Map<AccessType, AccessControlList> getAcls(String queue) {
Map<AccessType, AccessControlList> acls =
new HashMap<AccessType, AccessControlList>();
for (QueueACL acl : QueueACL.values()) {
acls.put(SchedulerUtils.toAccessType(acl), getAcl(queue, acl));
}
return acls;
}
public void setAcls(String queue, Map<QueueACL, AccessControlList> acls) {
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
setAcl(queue, e.getKey(), e.getValue().getAclString());
}
}
public String[] getQueues(String queue) {
LOG.debug("CSConf - getQueues called for: queuePrefix=" + getQueuePrefix(queue));
String[] queues = getStrings(getQueuePrefix(queue) + QUEUES);
List<String> trimmedQueueNames = new ArrayList<String>();
if (null != queues) {
for (String s : queues) {
trimmedQueueNames.add(s.trim());
}
queues = trimmedQueueNames.toArray(new String[0]);
}
LOG.debug("CSConf - getQueues: queuePrefix=" + getQueuePrefix(queue) +
", queues=" + ((queues == null) ? "" : StringUtils.arrayToString(queues)));
return queues;
}
public void setQueues(String queue, String[] subQueues) {
set(getQueuePrefix(queue) + QUEUES, StringUtils.arrayToString(subQueues));
LOG.debug("CSConf - setQueues: qPrefix=" + getQueuePrefix(queue) +
", queues=" + StringUtils.arrayToString(subQueues));
}
public Resource getMinimumAllocation() {
int minimumMemory = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int minimumCores = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
return Resources.createResource(minimumMemory, minimumCores);
}
public Resource getMaximumAllocation() {
int maximumMemory = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
int maximumCores = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
return Resources.createResource(maximumMemory, maximumCores);
}
/**
* Get the per queue setting for the maximum limit to allocate to
* each container request.
*
* @param queue
* name of the queue
* @return setting specified per queue else falls back to the cluster setting
*/
public Resource getMaximumAllocationPerQueue(String queue) {
String queuePrefix = getQueuePrefix(queue);
int maxAllocationMbPerQueue = getInt(queuePrefix + MAXIMUM_ALLOCATION_MB,
(int)UNDEFINED);
int maxAllocationVcoresPerQueue = getInt(
queuePrefix + MAXIMUM_ALLOCATION_VCORES, (int)UNDEFINED);
if (LOG.isDebugEnabled()) {
LOG.debug("max alloc mb per queue for " + queue + " is "
+ maxAllocationMbPerQueue);
LOG.debug("max alloc vcores per queue for " + queue + " is "
+ maxAllocationVcoresPerQueue);
}
Resource clusterMax = getMaximumAllocation();
if (maxAllocationMbPerQueue == (int)UNDEFINED) {
LOG.info("max alloc mb per queue for " + queue + " is undefined");
maxAllocationMbPerQueue = clusterMax.getMemory();
}
if (maxAllocationVcoresPerQueue == (int)UNDEFINED) {
LOG.info("max alloc vcore per queue for " + queue + " is undefined");
maxAllocationVcoresPerQueue = clusterMax.getVirtualCores();
}
Resource result = Resources.createResource(maxAllocationMbPerQueue,
maxAllocationVcoresPerQueue);
if (maxAllocationMbPerQueue > clusterMax.getMemory()
|| maxAllocationVcoresPerQueue > clusterMax.getVirtualCores()) {
throw new IllegalArgumentException(
"Queue maximum allocation cannot be larger than the cluster setting"
+ " for queue " + queue
+ " max allocation per queue: " + result
+ " cluster setting: " + clusterMax);
}
return result;
}
public boolean getEnableUserMetrics() {
return getBoolean(ENABLE_USER_METRICS, DEFAULT_ENABLE_USER_METRICS);
}
public int getNodeLocalityDelay() {
return getInt(NODE_LOCALITY_DELAY, DEFAULT_NODE_LOCALITY_DELAY);
}
public ResourceCalculator getResourceCalculator() {
return ReflectionUtils.newInstance(
getClass(
RESOURCE_CALCULATOR_CLASS,
DEFAULT_RESOURCE_CALCULATOR_CLASS,
ResourceCalculator.class),
this);
}
public boolean getUsePortForNodeName() {
return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
}
public void setResourceComparator(
Class<? extends ResourceCalculator> resourceCalculatorClass) {
setClass(
RESOURCE_CALCULATOR_CLASS,
resourceCalculatorClass,
ResourceCalculator.class);
}
public boolean getScheduleAynschronously() {
return getBoolean(SCHEDULE_ASYNCHRONOUSLY_ENABLE,
DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE);
}
public void setScheduleAynschronously(boolean async) {
setBoolean(SCHEDULE_ASYNCHRONOUSLY_ENABLE, async);
}
public boolean getOverrideWithQueueMappings() {
return getBoolean(ENABLE_QUEUE_MAPPING_OVERRIDE,
DEFAULT_ENABLE_QUEUE_MAPPING_OVERRIDE);
}
/**
* Returns a collection of strings, trimming leading and trailing whitespeace
* on each value
*
* @param str
* String to parse
* @param delim
* delimiter to separate the values
* @return Collection of parsed elements.
*/
private static Collection<String> getTrimmedStringCollection(String str,
String delim) {
List<String> values = new ArrayList<String>();
if (str == null)
return values;
StringTokenizer tokenizer = new StringTokenizer(str, delim);
while (tokenizer.hasMoreTokens()) {
String next = tokenizer.nextToken();
if (next == null || next.trim().isEmpty()) {
continue;
}
values.add(next.trim());
}
return values;
}
/**
* Get user/group mappings to queues.
*
* @return user/groups mappings or null on illegal configs
*/
public List<QueueMapping> getQueueMappings() {
List<QueueMapping> mappings =
new ArrayList<CapacitySchedulerConfiguration.QueueMapping>();
Collection<String> mappingsString =
getTrimmedStringCollection(QUEUE_MAPPING);
for (String mappingValue : mappingsString) {
String[] mapping =
getTrimmedStringCollection(mappingValue, ":")
.toArray(new String[] {});
if (mapping.length != 3 || mapping[1].length() == 0
|| mapping[2].length() == 0) {
throw new IllegalArgumentException(
"Illegal queue mapping " + mappingValue);
}
QueueMapping m;
try {
QueueMapping.MappingType mappingType;
if (mapping[0].equals("u")) {
mappingType = QueueMapping.MappingType.USER;
} else if (mapping[0].equals("g")) {
mappingType = QueueMapping.MappingType.GROUP;
} else {
throw new IllegalArgumentException(
"unknown mapping prefix " + mapping[0]);
}
m = new QueueMapping(
mappingType,
mapping[1],
mapping[2]);
} catch (Throwable t) {
throw new IllegalArgumentException(
"Illegal queue mapping " + mappingValue);
}
if (m != null) {
mappings.add(m);
}
}
return mappings;
}
public boolean isReservable(String queue) {
boolean isReservable =
getBoolean(getQueuePrefix(queue) + IS_RESERVABLE, false);
return isReservable;
}
public void setReservable(String queue, boolean isReservable) {
setBoolean(getQueuePrefix(queue) + IS_RESERVABLE, isReservable);
LOG.debug("here setReservableQueue: queuePrefix=" + getQueuePrefix(queue)
+ ", isReservableQueue=" + isReservable(queue));
}
@Override
public long getReservationWindow(String queue) {
long reservationWindow =
getLong(getQueuePrefix(queue) + RESERVATION_WINDOW,
DEFAULT_RESERVATION_WINDOW);
return reservationWindow;
}
@Override
public float getAverageCapacity(String queue) {
float avgCapacity =
getFloat(getQueuePrefix(queue) + AVERAGE_CAPACITY,
MAXIMUM_CAPACITY_VALUE);
return avgCapacity;
}
@Override
public float getInstantaneousMaxCapacity(String queue) {
float instMaxCapacity =
getFloat(getQueuePrefix(queue) + INSTANTANEOUS_MAX_CAPACITY,
MAXIMUM_CAPACITY_VALUE);
return instMaxCapacity;
}
public void setInstantaneousMaxCapacity(String queue, float instMaxCapacity) {
setFloat(getQueuePrefix(queue) + INSTANTANEOUS_MAX_CAPACITY,
instMaxCapacity);
}
public void setReservationWindow(String queue, long reservationWindow) {
setLong(getQueuePrefix(queue) + RESERVATION_WINDOW, reservationWindow);
}
public void setAverageCapacity(String queue, float avgCapacity) {
setFloat(getQueuePrefix(queue) + AVERAGE_CAPACITY, avgCapacity);
}
@Override
public String getReservationAdmissionPolicy(String queue) {
String reservationPolicy =
get(getQueuePrefix(queue) + RESERVATION_ADMISSION_POLICY,
DEFAULT_RESERVATION_ADMISSION_POLICY);
return reservationPolicy;
}
public void setReservationAdmissionPolicy(String queue,
String reservationPolicy) {
set(getQueuePrefix(queue) + RESERVATION_ADMISSION_POLICY, reservationPolicy);
}
@Override
public String getReservationAgent(String queue) {
String reservationAgent =
get(getQueuePrefix(queue) + RESERVATION_AGENT_NAME,
DEFAULT_RESERVATION_AGENT_NAME);
return reservationAgent;
}
public void setReservationAgent(String queue, String reservationPolicy) {
set(getQueuePrefix(queue) + RESERVATION_AGENT_NAME, reservationPolicy);
}
@Override
public boolean getShowReservationAsQueues(String queuePath) {
boolean showReservationAsQueues =
getBoolean(getQueuePrefix(queuePath)
+ RESERVATION_SHOW_RESERVATION_AS_QUEUE,
DEFAULT_SHOW_RESERVATIONS_AS_QUEUES);
return showReservationAsQueues;
}
@Override
public String getReplanner(String queue) {
String replanner =
get(getQueuePrefix(queue) + RESERVATION_PLANNER_NAME,
DEFAULT_RESERVATION_PLANNER_NAME);
return replanner;
}
@Override
public boolean getMoveOnExpiry(String queue) {
boolean killOnExpiry =
getBoolean(getQueuePrefix(queue) + RESERVATION_MOVE_ON_EXPIRY,
DEFAULT_RESERVATION_MOVE_ON_EXPIRY);
return killOnExpiry;
}
@Override
public long getEnforcementWindow(String queue) {
long enforcementWindow =
getLong(getQueuePrefix(queue) + RESERVATION_ENFORCEMENT_WINDOW,
DEFAULT_RESERVATION_ENFORCEMENT_WINDOW);
return enforcementWindow;
}
/**
* Sets the <em>disable_preemption</em> property in order to indicate
* whether or not container preemption will be disabled for the specified
* queue.
*
* @param queue queue path
* @param preemptionDisabled true if preemption is disabled on queue
*/
public void setPreemptionDisabled(String queue, boolean preemptionDisabled) {
setBoolean(getQueuePrefix(queue) + QUEUE_PREEMPTION_DISABLED,
preemptionDisabled);
}
/**
* Indicates whether preemption is disabled on the specified queue.
*
* @param queue queue path to query
* @param defaultVal used as default if the <em>disable_preemption</em>
* is not set in the configuration
* @return true if preemption is disabled on <em>queue</em>, false otherwise
*/
public boolean getPreemptionDisabled(String queue, boolean defaultVal) {
boolean preemptionDisabled =
getBoolean(getQueuePrefix(queue) + QUEUE_PREEMPTION_DISABLED,
defaultVal);
return preemptionDisabled;
}
/**
* Get configured node labels in a given queuePath
*/
public Set<String> getConfiguredNodeLabels(String queuePath) {
Set<String> configuredNodeLabels = new HashSet<String>();
Entry<String, String> e = null;
Iterator<Entry<String, String>> iter = iterator();
while (iter.hasNext()) {
e = iter.next();
String key = e.getKey();
if (key.startsWith(getQueuePrefix(queuePath) + ACCESSIBLE_NODE_LABELS
+ DOT)) {
// Find <label-name> in
// <queue-path>.accessible-node-labels.<label-name>.property
int labelStartIdx =
key.indexOf(ACCESSIBLE_NODE_LABELS)
+ ACCESSIBLE_NODE_LABELS.length() + 1;
int labelEndIndx = key.indexOf('.', labelStartIdx);
String labelName = key.substring(labelStartIdx, labelEndIndx);
configuredNodeLabels.add(labelName);
}
}
// always add NO_LABEL
configuredNodeLabels.add(RMNodeLabelsManager.NO_LABEL);
return configuredNodeLabels;
}
public Integer getDefaultApplicationPriorityConfPerQueue(String queue) {
Integer defaultPriority = getInt(getQueuePrefix(queue)
+ DEFAULT_APPLICATION_PRIORITY,
DEFAULT_CONFIGURATION_APPLICATION_PRIORITY);
return defaultPriority;
}
}
| 32,613 | 32.83195 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ReservationQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a dynamic {@link LeafQueue} managed by the
* {@link ReservationSystem}
*
*/
public class ReservationQueue extends LeafQueue {
private static final Logger LOG = LoggerFactory
.getLogger(ReservationQueue.class);
private PlanQueue parent;
public ReservationQueue(CapacitySchedulerContext cs, String queueName,
PlanQueue parent) throws IOException {
super(cs, queueName, parent, null);
// the following parameters are common to all reservation in the plan
updateQuotas(parent.getUserLimitForReservation(),
parent.getUserLimitFactor(),
parent.getMaxApplicationsForReservations(),
parent.getMaxApplicationsPerUserForReservation());
this.parent = parent;
}
@Override
public synchronized void reinitialize(CSQueue newlyParsedQueue,
Resource clusterResource) throws IOException {
// Sanity check
if (!(newlyParsedQueue instanceof ReservationQueue)
|| !newlyParsedQueue.getQueuePath().equals(getQueuePath())) {
throw new IOException("Trying to reinitialize " + getQueuePath()
+ " from " + newlyParsedQueue.getQueuePath());
}
super.reinitialize(newlyParsedQueue, clusterResource);
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, null);
updateQuotas(parent.getUserLimitForReservation(),
parent.getUserLimitFactor(),
parent.getMaxApplicationsForReservations(),
parent.getMaxApplicationsPerUserForReservation());
}
/**
* This methods to change capacity for a queue and adjusts its
* absoluteCapacity
*
* @param entitlement the new entitlement for the queue (capacity,
* maxCapacity, etc..)
* @throws SchedulerDynamicEditException
*/
public synchronized void setEntitlement(QueueEntitlement entitlement)
throws SchedulerDynamicEditException {
float capacity = entitlement.getCapacity();
if (capacity < 0 || capacity > 1.0f) {
throw new SchedulerDynamicEditException(
"Capacity demand is not in the [0,1] range: " + capacity);
}
setCapacity(capacity);
setAbsoluteCapacity(getParent().getAbsoluteCapacity() * getCapacity());
// note: we currently set maxCapacity to capacity
// this might be revised later
setMaxCapacity(entitlement.getMaxCapacity());
if (LOG.isDebugEnabled()) {
LOG.debug("successfully changed to " + capacity + " for queue "
+ this.getQueueName());
}
}
private void updateQuotas(int userLimit, float userLimitFactor,
int maxAppsForReservation, int maxAppsPerUserForReservation) {
setUserLimit(userLimit);
setUserLimitFactor(userLimitFactor);
setMaxApplications(maxAppsForReservation);
maxApplicationsPerUser = maxAppsPerUserForReservation;
}
@Override
protected void setupConfigurableCapacities() {
CSQueueUtils.updateAndCheckCapacitiesByLabel(getQueuePath(),
queueCapacities, parent == null ? null : parent.getQueueCapacities());
}
}
| 4,333 | 37.696429 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
public class UserInfo {
protected String username;
protected ResourceInfo resourcesUsed;
protected int numPendingApplications;
protected int numActiveApplications;
protected ResourceInfo AMResourceUsed;
protected ResourceInfo userResourceLimit;
UserInfo() {}
UserInfo(String username, Resource resUsed, int activeApps, int pendingApps,
Resource amResUsed, Resource resourceLimit) {
this.username = username;
this.resourcesUsed = new ResourceInfo(resUsed);
this.numActiveApplications = activeApps;
this.numPendingApplications = pendingApps;
this.AMResourceUsed = new ResourceInfo(amResUsed);
this.userResourceLimit = new ResourceInfo(resourceLimit);
}
public String getUsername() {
return username;
}
public ResourceInfo getResourcesUsed() {
return resourcesUsed;
}
public int getNumPendingApplications() {
return numPendingApplications;
}
public int getNumActiveApplications() {
return numActiveApplications;
}
public ResourceInfo getAMResourcesUsed() {
return AMResourceUsed;
}
public ResourceInfo getUserResourceLimit() {
return userResourceLimit;
}
}
| 2,375 | 31.108108 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.collect.Sets;
class CSQueueUtils {
final static float EPSILON = 0.0001f;
/*
* Used only by tests
*/
public static void checkMaxCapacity(String queueName,
float capacity, float maximumCapacity) {
if (maximumCapacity < 0.0f || maximumCapacity > 1.0f) {
throw new IllegalArgumentException(
"Illegal value of maximumCapacity " + maximumCapacity +
" used in call to setMaxCapacity for queue " + queueName);
}
}
/*
* Used only by tests
*/
public static void checkAbsoluteCapacity(String queueName,
float absCapacity, float absMaxCapacity) {
if (absMaxCapacity < (absCapacity - EPSILON)) {
throw new IllegalArgumentException("Illegal call to setMaxCapacity. "
+ "Queue '" + queueName + "' has " + "an absolute capacity (" + absCapacity
+ ") greater than " + "its absolute maximumCapacity (" + absMaxCapacity
+ ")");
}
}
/**
* Check sanity of capacities:
* - capacity <= maxCapacity
* - absCapacity <= absMaximumCapacity
*/
private static void capacitiesSanityCheck(String queueName,
QueueCapacities queueCapacities) {
for (String label : queueCapacities.getExistingNodeLabels()) {
float capacity = queueCapacities.getCapacity(label);
float maximumCapacity = queueCapacities.getMaximumCapacity(label);
if (capacity > maximumCapacity) {
throw new IllegalArgumentException("Illegal queue capacity setting, "
+ "(capacity=" + capacity + ") > (maximum-capacity="
+ maximumCapacity + "). When label=[" + label + "]");
}
// Actually, this may not needed since we have verified capacity <=
// maximumCapacity. And the way we compute absolute capacity (abs(x) =
// cap(x) * cap(x.parent) * ...) is a monotone increasing function. But
// just keep it here to make sure our compute abs capacity method works
// correctly.
float absCapacity = queueCapacities.getAbsoluteCapacity(label);
float absMaxCapacity = queueCapacities.getAbsoluteMaximumCapacity(label);
if (absCapacity > absMaxCapacity) {
throw new IllegalArgumentException("Illegal queue capacity setting, "
+ "(abs-capacity=" + absCapacity + ") > (abs-maximum-capacity="
+ absMaxCapacity + "). When label=[" + label + "]");
}
}
}
public static float computeAbsoluteMaximumCapacity(
float maximumCapacity, CSQueue parent) {
float parentAbsMaxCapacity =
(parent == null) ? 1.0f : parent.getAbsoluteMaximumCapacity();
return (parentAbsMaxCapacity * maximumCapacity);
}
/**
* This method intends to be used by ReservationQueue, ReservationQueue will
* not appear in configuration file, so we shouldn't do load capacities
* settings in configuration for reservation queue.
*/
public static void updateAndCheckCapacitiesByLabel(String queuePath,
QueueCapacities queueCapacities, QueueCapacities parentQueueCapacities) {
updateAbsoluteCapacitiesByNodeLabels(queueCapacities, parentQueueCapacities);
capacitiesSanityCheck(queuePath, queueCapacities);
}
/**
* Do following steps for capacities
* - Load capacities from configuration
* - Update absolute capacities for new capacities
* - Check if capacities/absolute-capacities legal
*/
public static void loadUpdateAndCheckCapacities(String queuePath,
CapacitySchedulerConfiguration csConf,
QueueCapacities queueCapacities, QueueCapacities parentQueueCapacities) {
loadCapacitiesByLabelsFromConf(queuePath,
queueCapacities, csConf);
updateAbsoluteCapacitiesByNodeLabels(queueCapacities, parentQueueCapacities);
capacitiesSanityCheck(queuePath, queueCapacities);
}
private static void loadCapacitiesByLabelsFromConf(String queuePath,
QueueCapacities queueCapacities, CapacitySchedulerConfiguration csConf) {
queueCapacities.clearConfigurableFields();
Set<String> configuredNodelabels =
csConf.getConfiguredNodeLabels(queuePath);
for (String label : configuredNodelabels) {
if (label.equals(CommonNodeLabelsManager.NO_LABEL)) {
queueCapacities.setCapacity(CommonNodeLabelsManager.NO_LABEL,
csConf.getNonLabeledQueueCapacity(queuePath) / 100);
queueCapacities.setMaximumCapacity(CommonNodeLabelsManager.NO_LABEL,
csConf.getNonLabeledQueueMaximumCapacity(queuePath) / 100);
} else {
queueCapacities.setCapacity(label,
csConf.getLabeledQueueCapacity(queuePath, label) / 100);
queueCapacities.setMaximumCapacity(label,
csConf.getLabeledQueueMaximumCapacity(queuePath, label) / 100);
}
}
}
// Set absolute capacities for {capacity, maximum-capacity}
private static void updateAbsoluteCapacitiesByNodeLabels(
QueueCapacities queueCapacities, QueueCapacities parentQueueCapacities) {
for (String label : queueCapacities.getExistingNodeLabels()) {
float capacity = queueCapacities.getCapacity(label);
if (capacity > 0f) {
queueCapacities.setAbsoluteCapacity(
label,
capacity
* (parentQueueCapacities == null ? 1 : parentQueueCapacities
.getAbsoluteCapacity(label)));
}
float maxCapacity = queueCapacities.getMaximumCapacity(label);
if (maxCapacity > 0f) {
queueCapacities.setAbsoluteMaximumCapacity(
label,
maxCapacity
* (parentQueueCapacities == null ? 1 : parentQueueCapacities
.getAbsoluteMaximumCapacity(label)));
}
}
}
/**
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
private static void updateUsedCapacity(final ResourceCalculator rc,
final Resource totalPartitionResource, final Resource minimumAllocation,
ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
String nodePartition) {
float absoluteUsedCapacity = 0.0f;
float usedCapacity = 0.0f;
if (Resources.greaterThan(rc, totalPartitionResource,
totalPartitionResource, Resources.none())) {
// queueGuaranteed = totalPartitionedResource *
// absolute_capacity(partition)
Resource queueGuranteedResource =
Resources.multiply(totalPartitionResource,
queueCapacities.getAbsoluteCapacity(nodePartition));
// make queueGuranteed >= minimum_allocation to avoid divided by 0.
queueGuranteedResource =
Resources.max(rc, totalPartitionResource, queueGuranteedResource,
minimumAllocation);
Resource usedResource = queueResourceUsage.getUsed(nodePartition);
absoluteUsedCapacity =
Resources.divide(rc, totalPartitionResource, usedResource,
totalPartitionResource);
usedCapacity =
Resources.divide(rc, totalPartitionResource, usedResource,
queueGuranteedResource);
}
queueCapacities
.setAbsoluteUsedCapacity(nodePartition, absoluteUsedCapacity);
queueCapacities.setUsedCapacity(nodePartition, usedCapacity);
}
private static Resource getNonPartitionedMaxAvailableResourceToQueue(
final ResourceCalculator rc, Resource totalNonPartitionedResource,
CSQueue queue) {
Resource queueLimit = Resources.none();
Resource usedResources = queue.getUsedResources();
if (Resources.greaterThan(rc, totalNonPartitionedResource,
totalNonPartitionedResource, Resources.none())) {
queueLimit =
Resources.multiply(totalNonPartitionedResource,
queue.getAbsoluteCapacity());
}
Resource available = Resources.subtract(queueLimit, usedResources);
return Resources.max(rc, totalNonPartitionedResource, available,
Resources.none());
}
/**
* <p>
* Update Queue Statistics:
* </p>
*
* <li>used-capacity/absolute-used-capacity by partition</li>
* <li>non-partitioned max-avail-resource to queue</li>
*
* <p>
* When nodePartition is null, all partition of
* used-capacity/absolute-used-capacity will be updated.
* </p>
*/
@Lock(CSQueue.class)
public static void updateQueueStatistics(
final ResourceCalculator rc, final Resource cluster, final Resource minimumAllocation,
final CSQueue childQueue, final RMNodeLabelsManager nlm,
final String nodePartition) {
QueueCapacities queueCapacities = childQueue.getQueueCapacities();
ResourceUsage queueResourceUsage = childQueue.getQueueResourceUsage();
if (nodePartition == null) {
for (String partition : Sets.union(
queueCapacities.getNodePartitionsSet(),
queueResourceUsage.getNodePartitionsSet())) {
updateUsedCapacity(rc, nlm.getResourceByLabel(partition, cluster),
minimumAllocation, queueResourceUsage, queueCapacities, partition);
}
} else {
updateUsedCapacity(rc, nlm.getResourceByLabel(nodePartition, cluster),
minimumAllocation, queueResourceUsage, queueCapacities, nodePartition);
}
// Now in QueueMetrics, we only store available-resource-to-queue for
// default partition.
if (nodePartition == null
|| nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) {
childQueue.getMetrics().setAvailableResourcesToQueue(
getNonPartitionedMaxAvailableResourceToQueue(rc,
nlm.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, cluster),
childQueue));
}
}
}
| 10,993 | 39.124088 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Evolving
public class ParentQueue extends AbstractCSQueue {
private static final Log LOG = LogFactory.getLog(ParentQueue.class);
protected final Set<CSQueue> childQueues;
private final boolean rootQueue;
final Comparator<CSQueue> nonPartitionedQueueComparator;
final PartitionedQueueComparator partitionQueueComparator;
volatile int numApplications;
private final CapacitySchedulerContext scheduler;
private boolean needToResortQueuesAtNextAllocation = false;
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
public ParentQueue(CapacitySchedulerContext cs,
String queueName, CSQueue parent, CSQueue old) throws IOException {
super(cs, queueName, parent, old);
this.scheduler = cs;
this.nonPartitionedQueueComparator = cs.getNonPartitionedQueueComparator();
this.partitionQueueComparator = cs.getPartitionedQueueComparator();
this.rootQueue = (parent == null);
float rawCapacity = cs.getConfiguration().getNonLabeledQueueCapacity(getQueuePath());
if (rootQueue &&
(rawCapacity != CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE)) {
throw new IllegalArgumentException("Illegal " +
"capacity of " + rawCapacity + " for queue " + queueName +
". Must be " + CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE);
}
this.childQueues = new TreeSet<CSQueue>(nonPartitionedQueueComparator);
setupQueueConfigs(cs.getClusterResource());
LOG.info("Initialized parent-queue " + queueName +
" name=" + queueName +
", fullname=" + getQueuePath());
}
synchronized void setupQueueConfigs(Resource clusterResource)
throws IOException {
super.setupQueueConfigs(clusterResource);
StringBuilder aclsString = new StringBuilder();
for (Map.Entry<AccessType, AccessControlList> e : acls.entrySet()) {
aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
}
StringBuilder labelStrBuilder = new StringBuilder();
if (accessibleLabels != null) {
for (String s : accessibleLabels) {
labelStrBuilder.append(s);
labelStrBuilder.append(",");
}
}
LOG.info(queueName +
", capacity=" + this.queueCapacities.getCapacity() +
", asboluteCapacity=" + this.queueCapacities.getAbsoluteCapacity() +
", maxCapacity=" + this.queueCapacities.getMaximumCapacity() +
", asboluteMaxCapacity=" + this.queueCapacities.getAbsoluteMaximumCapacity() +
", state=" + state +
", acls=" + aclsString +
", labels=" + labelStrBuilder.toString() + "\n" +
", reservationsContinueLooking=" + reservationsContinueLooking);
}
private static float PRECISION = 0.0005f; // 0.05% precision
synchronized void setChildQueues(Collection<CSQueue> childQueues) {
// Validate
float childCapacities = 0;
for (CSQueue queue : childQueues) {
childCapacities += queue.getCapacity();
}
float delta = Math.abs(1.0f - childCapacities); // crude way to check
// allow capacities being set to 0, and enforce child 0 if parent is 0
if (((queueCapacities.getCapacity() > 0) && (delta > PRECISION)) ||
((queueCapacities.getCapacity() == 0) && (childCapacities > 0))) {
throw new IllegalArgumentException("Illegal" +
" capacity of " + childCapacities +
" for children of queue " + queueName);
}
// check label capacities
for (String nodeLabel : queueCapacities.getExistingNodeLabels()) {
float capacityByLabel = queueCapacities.getCapacity(nodeLabel);
// check children's labels
float sum = 0;
for (CSQueue queue : childQueues) {
sum += queue.getQueueCapacities().getCapacity(nodeLabel);
}
if ((capacityByLabel > 0 && Math.abs(1.0f - sum) > PRECISION)
|| (capacityByLabel == 0) && (sum > 0)) {
throw new IllegalArgumentException("Illegal" + " capacity of "
+ sum + " for children of queue " + queueName
+ " for label=" + nodeLabel);
}
}
this.childQueues.clear();
this.childQueues.addAll(childQueues);
if (LOG.isDebugEnabled()) {
LOG.debug("setChildQueues: " + getChildQueuesToPrint());
}
}
@Override
public String getQueuePath() {
String parentPath = ((parent == null) ? "" : (parent.getQueuePath() + "."));
return parentPath + getQueueName();
}
@Override
public synchronized QueueInfo getQueueInfo(
boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = getQueueInfo();
List<QueueInfo> childQueuesInfo = new ArrayList<QueueInfo>();
if (includeChildQueues) {
for (CSQueue child : childQueues) {
// Get queue information recursively?
childQueuesInfo.add(
child.getQueueInfo(recursive, recursive));
}
}
queueInfo.setChildQueues(childQueuesInfo);
return queueInfo;
}
private synchronized QueueUserACLInfo getUserAclInfo(
UserGroupInformation user) {
QueueUserACLInfo userAclInfo =
recordFactory.newRecordInstance(QueueUserACLInfo.class);
List<QueueACL> operations = new ArrayList<QueueACL>();
for (QueueACL operation : QueueACL.values()) {
if (hasAccess(operation, user)) {
operations.add(operation);
}
}
userAclInfo.setQueueName(getQueueName());
userAclInfo.setUserAcls(operations);
return userAclInfo;
}
@Override
public synchronized List<QueueUserACLInfo> getQueueUserAclInfo(
UserGroupInformation user) {
List<QueueUserACLInfo> userAcls = new ArrayList<QueueUserACLInfo>();
// Add parent queue acls
userAcls.add(getUserAclInfo(user));
// Add children queue acls
for (CSQueue child : childQueues) {
userAcls.addAll(child.getQueueUserAclInfo(user));
}
return userAcls;
}
public String toString() {
return queueName + ": " +
"numChildQueue= " + childQueues.size() + ", " +
"capacity=" + queueCapacities.getCapacity() + ", " +
"absoluteCapacity=" + queueCapacities.getAbsoluteCapacity() + ", " +
"usedResources=" + queueUsage.getUsed() +
"usedCapacity=" + getUsedCapacity() + ", " +
"numApps=" + getNumApplications() + ", " +
"numContainers=" + getNumContainers();
}
@Override
public synchronized void reinitialize(CSQueue newlyParsedQueue,
Resource clusterResource) throws IOException {
// Sanity check
if (!(newlyParsedQueue instanceof ParentQueue) ||
!newlyParsedQueue.getQueuePath().equals(getQueuePath())) {
throw new IOException("Trying to reinitialize " + getQueuePath() +
" from " + newlyParsedQueue.getQueuePath());
}
ParentQueue newlyParsedParentQueue = (ParentQueue)newlyParsedQueue;
// Set new configs
setupQueueConfigs(clusterResource);
// Re-configure existing child queues and add new ones
// The CS has already checked to ensure all existing child queues are present!
Map<String, CSQueue> currentChildQueues = getQueues(childQueues);
Map<String, CSQueue> newChildQueues =
getQueues(newlyParsedParentQueue.childQueues);
for (Map.Entry<String, CSQueue> e : newChildQueues.entrySet()) {
String newChildQueueName = e.getKey();
CSQueue newChildQueue = e.getValue();
CSQueue childQueue = currentChildQueues.get(newChildQueueName);
// Check if the child-queue already exists
if (childQueue != null) {
// Re-init existing child queues
childQueue.reinitialize(newChildQueue, clusterResource);
LOG.info(getQueueName() + ": re-configured queue: " + childQueue);
} else {
// New child queue, do not re-init
// Set parent to 'this'
newChildQueue.setParent(this);
// Save in list of current child queues
currentChildQueues.put(newChildQueueName, newChildQueue);
LOG.info(getQueueName() + ": added new child queue: " + newChildQueue);
}
}
// Re-sort all queues
childQueues.clear();
childQueues.addAll(currentChildQueues.values());
}
Map<String, CSQueue> getQueues(Set<CSQueue> queues) {
Map<String, CSQueue> queuesMap = new HashMap<String, CSQueue>();
for (CSQueue queue : queues) {
queuesMap.put(queue.getQueueName(), queue);
}
return queuesMap;
}
@Override
public void submitApplication(ApplicationId applicationId, String user,
String queue) throws AccessControlException {
synchronized (this) {
// Sanity check
if (queue.equals(queueName)) {
throw new AccessControlException("Cannot submit application " +
"to non-leaf queue: " + queueName);
}
if (state != QueueState.RUNNING) {
throw new AccessControlException("Queue " + getQueuePath() +
" is STOPPED. Cannot accept submission of application: " +
applicationId);
}
addApplication(applicationId, user);
}
// Inform the parent queue
if (parent != null) {
try {
parent.submitApplication(applicationId, user, queue);
} catch (AccessControlException ace) {
LOG.info("Failed to submit application to parent-queue: " +
parent.getQueuePath(), ace);
removeApplication(applicationId, user);
throw ace;
}
}
}
@Override
public void submitApplicationAttempt(FiCaSchedulerApp application,
String userName) {
// submit attempt logic.
}
@Override
public void finishApplicationAttempt(FiCaSchedulerApp application,
String queue) {
// finish attempt logic.
}
private synchronized void addApplication(ApplicationId applicationId,
String user) {
++numApplications;
LOG.info("Application added -" +
" appId: " + applicationId +
" user: " + user +
" leaf-queue of parent: " + getQueueName() +
" #applications: " + getNumApplications());
}
@Override
public void finishApplication(ApplicationId application, String user) {
synchronized (this) {
removeApplication(application, user);
}
// Inform the parent queue
if (parent != null) {
parent.finishApplication(application, user);
}
}
private synchronized void removeApplication(ApplicationId applicationId,
String user) {
--numApplications;
LOG.info("Application removed -" +
" appId: " + applicationId +
" user: " + user +
" leaf-queue of parent: " + getQueueName() +
" #applications: " + getNumApplications());
}
@Override
public synchronized CSAssignment assignContainers(Resource clusterResource,
FiCaSchedulerNode node, ResourceLimits resourceLimits,
SchedulingMode schedulingMode) {
// if our queue cannot access this node, just return
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
&& !accessibleToPartition(node.getPartition())) {
return CSAssignment.NULL_ASSIGNMENT;
}
// Check if this queue need more resource, simply skip allocation if this
// queue doesn't need more resources.
if (!super.hasPendingResourceRequest(node.getPartition(),
clusterResource, schedulingMode)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip this queue=" + getQueuePath()
+ ", because it doesn't need more resource, schedulingMode="
+ schedulingMode.name() + " node-partition=" + node.getPartition());
}
return CSAssignment.NULL_ASSIGNMENT;
}
CSAssignment assignment =
new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL);
while (canAssign(clusterResource, node)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to assign containers to child-queue of "
+ getQueueName());
}
// Are we over maximum-capacity for this queue?
// This will also consider parent's limits and also continuous reservation
// looking
if (!super.canAssignToThisQueue(clusterResource, node.getPartition(),
resourceLimits, Resources.createResource(
getMetrics().getReservedMB(), getMetrics()
.getReservedVirtualCores()), schedulingMode)) {
break;
}
// Schedule
CSAssignment assignedToChild =
assignContainersToChildQueues(clusterResource, node, resourceLimits,
schedulingMode);
assignment.setType(assignedToChild.getType());
// Done if no child-queue assigned anything
if (Resources.greaterThan(
resourceCalculator, clusterResource,
assignedToChild.getResource(), Resources.none())) {
// Track resource utilization for the parent-queue
super.allocateResource(clusterResource, assignedToChild.getResource(),
node.getPartition());
// Track resource utilization in this pass of the scheduler
Resources
.addTo(assignment.getResource(), assignedToChild.getResource());
Resources.addTo(assignment.getAssignmentInformation().getAllocated(),
assignedToChild.getAssignmentInformation().getAllocated());
Resources.addTo(assignment.getAssignmentInformation().getReserved(),
assignedToChild.getAssignmentInformation().getReserved());
assignment.getAssignmentInformation().incrAllocations(
assignedToChild.getAssignmentInformation().getNumAllocations());
assignment.getAssignmentInformation().incrReservations(
assignedToChild.getAssignmentInformation().getNumReservations());
assignment
.getAssignmentInformation()
.getAllocationDetails()
.addAll(
assignedToChild.getAssignmentInformation().getAllocationDetails());
assignment
.getAssignmentInformation()
.getReservationDetails()
.addAll(
assignedToChild.getAssignmentInformation()
.getReservationDetails());
LOG.info("assignedContainer" +
" queue=" + getQueueName() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + queueUsage.getUsed() +
" cluster=" + clusterResource);
} else {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("ParentQ=" + getQueueName()
+ " assignedSoFarInThisIteration=" + assignment.getResource()
+ " usedCapacity=" + getUsedCapacity()
+ " absoluteUsedCapacity=" + getAbsoluteUsedCapacity());
}
// Do not assign more than one container if this isn't the root queue
// or if we've already assigned an off-switch container
if (!rootQueue || assignment.getType() == NodeType.OFF_SWITCH) {
if (LOG.isDebugEnabled()) {
if (rootQueue && assignment.getType() == NodeType.OFF_SWITCH) {
LOG.debug("Not assigning more than one off-switch container," +
" assignments so far: " + assignment);
}
}
break;
}
}
return assignment;
}
private boolean canAssign(Resource clusterResource, FiCaSchedulerNode node) {
return (node.getReservedContainer() == null) &&
Resources.greaterThanOrEqual(resourceCalculator, clusterResource,
node.getAvailableResource(), minimumAllocation);
}
private ResourceLimits getResourceLimitsOfChild(CSQueue child,
Resource clusterResource, ResourceLimits parentLimits) {
// Set resource-limit of a given child, child.limit =
// min(my.limit - my.used + child.used, child.max)
// Parent available resource = parent-limit - parent-used-resource
Resource parentMaxAvailableResource =
Resources.subtract(parentLimits.getLimit(), getUsedResources());
// Child's limit = parent-available-resource + child-used
Resource childLimit =
Resources.add(parentMaxAvailableResource, child.getUsedResources());
// Get child's max resource
Resource childConfiguredMaxResource =
Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource),
child.getAbsoluteMaximumCapacity(), minimumAllocation);
// Child's limit should be capped by child configured max resource
childLimit =
Resources.min(resourceCalculator, clusterResource, childLimit,
childConfiguredMaxResource);
// Normalize before return
childLimit =
Resources.roundDown(resourceCalculator, childLimit, minimumAllocation);
return new ResourceLimits(childLimit);
}
private Iterator<CSQueue> sortAndGetChildrenAllocationIterator(FiCaSchedulerNode node) {
if (node.getPartition().equals(RMNodeLabelsManager.NO_LABEL)) {
if (needToResortQueuesAtNextAllocation) {
// If we skipped resort queues last time, we need to re-sort queue
// before allocation
List<CSQueue> childrenList = new ArrayList<>(childQueues);
childQueues.clear();
childQueues.addAll(childrenList);
needToResortQueuesAtNextAllocation = false;
}
return childQueues.iterator();
}
partitionQueueComparator.setPartitionToLookAt(node.getPartition());
List<CSQueue> childrenList = new ArrayList<>(childQueues);
Collections.sort(childrenList, partitionQueueComparator);
return childrenList.iterator();
}
private synchronized CSAssignment assignContainersToChildQueues(
Resource cluster, FiCaSchedulerNode node, ResourceLimits limits,
SchedulingMode schedulingMode) {
CSAssignment assignment =
new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL);
printChildQueues();
// Try to assign to most 'under-served' sub-queue
for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator(node); iter
.hasNext();) {
CSQueue childQueue = iter.next();
if(LOG.isDebugEnabled()) {
LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath()
+ " stats: " + childQueue);
}
// Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits =
getResourceLimitsOfChild(childQueue, cluster, limits);
assignment = childQueue.assignContainers(cluster, node,
childLimits, schedulingMode);
if(LOG.isDebugEnabled()) {
LOG.debug("Assigned to queue: " + childQueue.getQueuePath() +
" stats: " + childQueue + " --> " +
assignment.getResource() + ", " + assignment.getType());
}
// If we do assign, remove the queue and re-insert in-order to re-sort
if (Resources.greaterThan(
resourceCalculator, cluster,
assignment.getResource(), Resources.none())) {
// Only update childQueues when we doing non-partitioned node
// allocation.
if (RMNodeLabelsManager.NO_LABEL.equals(node.getPartition())) {
// Remove and re-insert to sort
iter.remove();
LOG.info("Re-sorting assigned queue: " + childQueue.getQueuePath()
+ " stats: " + childQueue);
childQueues.add(childQueue);
if (LOG.isDebugEnabled()) {
printChildQueues();
}
}
break;
}
}
return assignment;
}
String getChildQueuesToPrint() {
StringBuilder sb = new StringBuilder();
for (CSQueue q : childQueues) {
sb.append(q.getQueuePath() +
"usedCapacity=(" + q.getUsedCapacity() + "), " +
" label=("
+ StringUtils.join(q.getAccessibleNodeLabels().iterator(), ",")
+ ")");
}
return sb.toString();
}
private void printChildQueues() {
if (LOG.isDebugEnabled()) {
LOG.debug("printChildQueues - queue: " + getQueuePath()
+ " child-queues: " + getChildQueuesToPrint());
}
}
@Override
public void completedContainer(Resource clusterResource,
FiCaSchedulerApp application, FiCaSchedulerNode node,
RMContainer rmContainer, ContainerStatus containerStatus,
RMContainerEventType event, CSQueue completedChildQueue,
boolean sortQueues) {
if (application != null) {
// Careful! Locking order is important!
// Book keeping
synchronized (this) {
super.releaseResource(clusterResource, rmContainer.getContainer()
.getResource(), node.getPartition());
LOG.info("completedContainer" +
" queue=" + getQueueName() +
" usedCapacity=" + getUsedCapacity() +
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
" used=" + queueUsage.getUsed() +
" cluster=" + clusterResource);
// Note that this is using an iterator on the childQueues so this can't
// be called if already within an iterator for the childQueues. Like
// from assignContainersToChildQueues.
if (sortQueues) {
// reinsert the updated queue
for (Iterator<CSQueue> iter = childQueues.iterator();
iter.hasNext();) {
CSQueue csqueue = iter.next();
if(csqueue.equals(completedChildQueue)) {
iter.remove();
LOG.info("Re-sorting completed queue: " + csqueue.getQueuePath() +
" stats: " + csqueue);
childQueues.add(csqueue);
break;
}
}
}
// If we skipped sort queue this time, we need to resort queues to make
// sure we allocate from least usage (or order defined by queue policy)
// queues.
needToResortQueuesAtNextAllocation = !sortQueues;
}
// Inform the parent
if (parent != null) {
// complete my parent
parent.completedContainer(clusterResource, application,
node, rmContainer, null, event, this, sortQueues);
}
}
}
@Override
public synchronized void updateClusterResource(Resource clusterResource,
ResourceLimits resourceLimits) {
// Update all children
for (CSQueue childQueue : childQueues) {
// Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits =
getResourceLimitsOfChild(childQueue, clusterResource, resourceLimits);
childQueue.updateClusterResource(clusterResource, childLimits);
}
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, null);
}
@Override
public synchronized List<CSQueue> getChildQueues() {
return new ArrayList<CSQueue>(childQueues);
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt attempt, RMContainer rmContainer) {
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
// Careful! Locking order is important!
synchronized (this) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
super.allocateResource(clusterResource, rmContainer.getContainer()
.getResource(), node.getPartition());
}
if (parent != null) {
parent.recoverContainer(clusterResource, attempt, rmContainer);
}
}
@Override
public ActiveUsersManager getActiveUsersManager() {
// Should never be called since all applications are submitted to LeafQueues
return null;
}
@Override
public synchronized void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps) {
for (CSQueue queue : childQueues) {
queue.collectSchedulerApplications(apps);
}
}
@Override
public void attachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer rmContainer) {
if (application != null) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
super.allocateResource(clusterResource, rmContainer.getContainer()
.getResource(), node.getPartition());
LOG.info("movedContainer" + " queueMoveIn=" + getQueueName()
+ " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity="
+ getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster="
+ clusterResource);
// Inform the parent
if (parent != null) {
parent.attachContainer(clusterResource, application, rmContainer);
}
}
}
@Override
public void detachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer rmContainer) {
if (application != null) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
super.releaseResource(clusterResource,
rmContainer.getContainer().getResource(),
node.getPartition());
LOG.info("movedContainer" + " queueMoveOut=" + getQueueName()
+ " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity="
+ getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster="
+ clusterResource);
// Inform the parent
if (parent != null) {
parent.detachContainer(clusterResource, application, rmContainer);
}
}
}
public synchronized int getNumApplications() {
return numApplications;
}
}
| 28,720 | 36.494778 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This represents a dynamic queue managed by the {@link ReservationSystem}.
* From the user perspective this is equivalent to a LeafQueue that respect
* reservations, but functionality wise is a sub-class of ParentQueue
*
*/
public class PlanQueue extends ParentQueue {
private static final Logger LOG = LoggerFactory.getLogger(PlanQueue.class);
private int maxAppsForReservation;
private int maxAppsPerUserForReservation;
private int userLimit;
private float userLimitFactor;
protected CapacitySchedulerContext schedulerContext;
private boolean showReservationsAsQueues;
public PlanQueue(CapacitySchedulerContext cs, String queueName,
CSQueue parent, CSQueue old) throws IOException {
super(cs, queueName, parent, old);
this.schedulerContext = cs;
// Set the reservation queue attributes for the Plan
CapacitySchedulerConfiguration conf = cs.getConfiguration();
String queuePath = super.getQueuePath();
int maxAppsForReservation = conf.getMaximumApplicationsPerQueue(queuePath);
showReservationsAsQueues = conf.getShowReservationAsQueues(queuePath);
if (maxAppsForReservation < 0) {
maxAppsForReservation =
(int) (CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * super
.getAbsoluteCapacity());
}
int userLimit = conf.getUserLimit(queuePath);
float userLimitFactor = conf.getUserLimitFactor(queuePath);
int maxAppsPerUserForReservation =
(int) (maxAppsForReservation * (userLimit / 100.0f) * userLimitFactor);
updateQuotas(userLimit, userLimitFactor, maxAppsForReservation,
maxAppsPerUserForReservation);
StringBuffer queueInfo = new StringBuffer();
queueInfo.append("Created Plan Queue: ").append(queueName)
.append("\nwith capacity: [").append(super.getCapacity())
.append("]\nwith max capacity: [").append(super.getMaximumCapacity())
.append("\nwith max reservation apps: [").append(maxAppsForReservation)
.append("]\nwith max reservation apps per user: [")
.append(maxAppsPerUserForReservation).append("]\nwith user limit: [")
.append(userLimit).append("]\nwith user limit factor: [")
.append(userLimitFactor).append("].");
LOG.info(queueInfo.toString());
}
@Override
public synchronized void reinitialize(CSQueue newlyParsedQueue,
Resource clusterResource) throws IOException {
// Sanity check
if (!(newlyParsedQueue instanceof PlanQueue)
|| !newlyParsedQueue.getQueuePath().equals(getQueuePath())) {
throw new IOException("Trying to reinitialize " + getQueuePath()
+ " from " + newlyParsedQueue.getQueuePath());
}
PlanQueue newlyParsedParentQueue = (PlanQueue) newlyParsedQueue;
if (newlyParsedParentQueue.getChildQueues().size() > 0) {
throw new IOException(
"Reservable Queue should not have sub-queues in the"
+ "configuration");
}
// Set new configs
setupQueueConfigs(clusterResource);
updateQuotas(newlyParsedParentQueue.userLimit,
newlyParsedParentQueue.userLimitFactor,
newlyParsedParentQueue.maxAppsForReservation,
newlyParsedParentQueue.maxAppsPerUserForReservation);
// run reinitialize on each existing queue, to trigger absolute cap
// recomputations
for (CSQueue res : this.getChildQueues()) {
res.reinitialize(res, clusterResource);
}
showReservationsAsQueues = newlyParsedParentQueue.showReservationsAsQueues;
}
synchronized void addChildQueue(CSQueue newQueue)
throws SchedulerDynamicEditException {
if (newQueue.getCapacity() > 0) {
throw new SchedulerDynamicEditException("Queue " + newQueue
+ " being added has non zero capacity.");
}
boolean added = this.childQueues.add(newQueue);
if (LOG.isDebugEnabled()) {
LOG.debug("updateChildQueues (action: add queue): " + added + " "
+ getChildQueuesToPrint());
}
}
synchronized void removeChildQueue(CSQueue remQueue)
throws SchedulerDynamicEditException {
if (remQueue.getCapacity() > 0) {
throw new SchedulerDynamicEditException("Queue " + remQueue
+ " being removed has non zero capacity.");
}
Iterator<CSQueue> qiter = childQueues.iterator();
while (qiter.hasNext()) {
CSQueue cs = qiter.next();
if (cs.equals(remQueue)) {
qiter.remove();
if (LOG.isDebugEnabled()) {
LOG.debug("Removed child queue: {}", cs.getQueueName());
}
}
}
}
protected synchronized float sumOfChildCapacities() {
float ret = 0;
for (CSQueue l : childQueues) {
ret += l.getCapacity();
}
return ret;
}
private void updateQuotas(int userLimit, float userLimitFactor,
int maxAppsForReservation, int maxAppsPerUserForReservation) {
this.userLimit = userLimit;
this.userLimitFactor = userLimitFactor;
this.maxAppsForReservation = maxAppsForReservation;
this.maxAppsPerUserForReservation = maxAppsPerUserForReservation;
}
/**
* Number of maximum applications for each of the reservations in this Plan.
*
* @return maxAppsForreservation
*/
public int getMaxApplicationsForReservations() {
return maxAppsForReservation;
}
/**
* Number of maximum applications per user for each of the reservations in
* this Plan.
*
* @return maxAppsPerUserForreservation
*/
public int getMaxApplicationsPerUserForReservation() {
return maxAppsPerUserForReservation;
}
/**
* User limit value for each of the reservations in this Plan.
*
* @return userLimit
*/
public int getUserLimitForReservation() {
return userLimit;
}
/**
* User limit factor value for each of the reservations in this Plan.
*
* @return userLimitFactor
*/
public float getUserLimitFactor() {
return userLimitFactor;
}
/**
* Determine whether to hide/show the ReservationQueues
*/
public boolean showReservationsAsQueues() {
return showReservationsAsQueues;
}
}
| 7,288 | 34.383495 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.Comparator;
public class PartitionedQueueComparator implements Comparator<CSQueue> {
private String partitionToLookAt = null;
public void setPartitionToLookAt(String partitionToLookAt) {
this.partitionToLookAt = partitionToLookAt;
}
@Override
public int compare(CSQueue q1, CSQueue q2) {
/*
* 1. Check accessible to given partition, if one queue accessible and
* the other not, accessible queue goes first.
*/
boolean q1Accessible =
q1.getAccessibleNodeLabels().contains(partitionToLookAt);
boolean q2Accessible =
q2.getAccessibleNodeLabels().contains(partitionToLookAt);
if (q1Accessible && !q2Accessible) {
return -1;
} else if (!q1Accessible && q2Accessible) {
return 1;
}
/*
*
* 2. When two queue has same accessibility, check who will go first:
* Now we simply compare their used resource on the partition to lookAt
*/
float used1 = q1.getQueueCapacities().getUsedCapacity(partitionToLookAt);
float used2 = q2.getQueueCapacities().getUsedCapacity(partitionToLookAt);
if (Math.abs(used1 - used2) < 1e-6) {
// When used capacity is same, compare their guaranteed-capacity
float cap1 = q1.getQueueCapacities().getCapacity(partitionToLookAt);
float cap2 = q2.getQueueCapacities().getCapacity(partitionToLookAt);
// when cap1 == cap2, we will compare queue's name
if (Math.abs(cap1 - cap2) < 1e-6) {
return q1.getQueueName().compareTo(q2.getQueueName());
}
return Float.compare(cap2, cap1);
}
return Float.compare(used1, used2);
}
}
| 2,520 | 35.536232 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueNotFoundException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerHealth;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QueueMapping;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QueueMapping.MappingType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerRescheduledEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeLabelsUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@LimitedPrivate("yarn")
@Evolving
@SuppressWarnings("unchecked")
public class CapacityScheduler extends
AbstractYarnScheduler<FiCaSchedulerApp, FiCaSchedulerNode> implements
PreemptableResourceScheduler, CapacitySchedulerContext, Configurable {
private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
private YarnAuthorizationProvider authorizer;
private CSQueue root;
// timeout to join when we stop this service
protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
static final Comparator<CSQueue> nonPartitionedQueueComparator =
new Comparator<CSQueue>() {
@Override
public int compare(CSQueue q1, CSQueue q2) {
if (q1.getUsedCapacity() < q2.getUsedCapacity()) {
return -1;
} else if (q1.getUsedCapacity() > q2.getUsedCapacity()) {
return 1;
}
return q1.getQueuePath().compareTo(q2.getQueuePath());
}
};
static final PartitionedQueueComparator partitionedQueueComparator =
new PartitionedQueueComparator();
public static final Comparator<FiCaSchedulerApp> applicationComparator =
new Comparator<FiCaSchedulerApp>() {
@Override
public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
if (!a1.getPriority().equals(a2.getPriority())) {
return a1.getPriority().compareTo(a2.getPriority());
}
return a1.getApplicationId().compareTo(a2.getApplicationId());
}
};
@Override
public void setConf(Configuration conf) {
yarnConf = conf;
}
private void validateConf(Configuration conf) {
// validate scheduler memory allocation setting
int minMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int maxMem = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
if (minMem <= 0 || minMem > maxMem) {
throw new YarnRuntimeException("Invalid resource scheduler memory"
+ " allocation configuration"
+ ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB
+ "=" + minMem
+ ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB
+ "=" + maxMem + ", min and max should be greater than 0"
+ ", max should be no smaller than min.");
}
// validate scheduler vcores allocation setting
int minVcores = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
int maxVcores = conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
if (minVcores <= 0 || minVcores > maxVcores) {
throw new YarnRuntimeException("Invalid resource scheduler vcores"
+ " allocation configuration"
+ ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES
+ "=" + minVcores
+ ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES
+ "=" + maxVcores + ", min and max should be greater than 0"
+ ", max should be no smaller than min.");
}
}
@Override
public Configuration getConf() {
return yarnConf;
}
private CapacitySchedulerConfiguration conf;
private Configuration yarnConf;
private Map<String, CSQueue> queues = new ConcurrentHashMap<String, CSQueue>();
private AtomicInteger numNodeManagers = new AtomicInteger(0);
private ResourceCalculator calculator;
private boolean usePortForNodeName;
private boolean scheduleAsynchronously;
private AsyncScheduleThread asyncSchedulerThread;
private RMNodeLabelsManager labelManager;
private SchedulerHealth schedulerHealth = new SchedulerHealth();
long lastNodeUpdateTime;
private Priority maxClusterLevelAppPriority;
/**
* EXPERT
*/
private long asyncScheduleInterval;
private static final String ASYNC_SCHEDULER_INTERVAL =
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
+ ".scheduling-interval-ms";
private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
private boolean overrideWithQueueMappings = false;
private List<QueueMapping> mappings = null;
private Groups groups;
@VisibleForTesting
public synchronized String getMappedQueueForTest(String user)
throws IOException {
return getMappedQueue(user);
}
public CapacityScheduler() {
super(CapacityScheduler.class.getName());
}
@Override
public QueueMetrics getRootQueueMetrics() {
return root.getMetrics();
}
public CSQueue getRootQueue() {
return root;
}
@Override
public CapacitySchedulerConfiguration getConfiguration() {
return conf;
}
@Override
public synchronized RMContainerTokenSecretManager
getContainerTokenSecretManager() {
return this.rmContext.getContainerTokenSecretManager();
}
@Override
public Comparator<FiCaSchedulerApp> getApplicationComparator() {
return applicationComparator;
}
@Override
public ResourceCalculator getResourceCalculator() {
return calculator;
}
@Override
public Comparator<CSQueue> getNonPartitionedQueueComparator() {
return nonPartitionedQueueComparator;
}
@Override
public PartitionedQueueComparator getPartitionedQueueComparator() {
return partitionedQueueComparator;
}
@Override
public int getNumClusterNodes() {
return numNodeManagers.get();
}
@Override
public synchronized RMContext getRMContext() {
return this.rmContext;
}
@Override
public synchronized void setRMContext(RMContext rmContext) {
this.rmContext = rmContext;
}
private synchronized void initScheduler(Configuration configuration) throws
IOException {
this.conf = loadCapacitySchedulerConfiguration(configuration);
validateConf(this.conf);
this.minimumAllocation = this.conf.getMinimumAllocation();
initMaximumResourceCapability(this.conf.getMaximumAllocation());
this.calculator = this.conf.getResourceCalculator();
this.usePortForNodeName = this.conf.getUsePortForNodeName();
this.applications =
new ConcurrentHashMap<ApplicationId,
SchedulerApplication<FiCaSchedulerApp>>();
this.labelManager = rmContext.getNodeLabelManager();
authorizer = YarnAuthorizationProvider.getInstance(yarnConf);
initializeQueues(this.conf);
scheduleAsynchronously = this.conf.getScheduleAynschronously();
asyncScheduleInterval =
this.conf.getLong(ASYNC_SCHEDULER_INTERVAL,
DEFAULT_ASYNC_SCHEDULER_INTERVAL);
if (scheduleAsynchronously) {
asyncSchedulerThread = new AsyncScheduleThread(this);
}
maxClusterLevelAppPriority = Priority.newInstance(yarnConf.getInt(
YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY,
YarnConfiguration.DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY));
LOG.info("Initialized CapacityScheduler with " +
"calculator=" + getResourceCalculator().getClass() + ", " +
"minimumAllocation=<" + getMinimumResourceCapability() + ">, " +
"maximumAllocation=<" + getMaximumResourceCapability() + ">, " +
"asynchronousScheduling=" + scheduleAsynchronously + ", " +
"asyncScheduleInterval=" + asyncScheduleInterval + "ms");
}
private synchronized void startSchedulerThreads() {
if (scheduleAsynchronously) {
Preconditions.checkNotNull(asyncSchedulerThread,
"asyncSchedulerThread is null");
asyncSchedulerThread.start();
}
}
@Override
public void serviceInit(Configuration conf) throws Exception {
Configuration configuration = new Configuration(conf);
super.serviceInit(conf);
initScheduler(configuration);
}
@Override
public void serviceStart() throws Exception {
startSchedulerThreads();
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
synchronized (this) {
if (scheduleAsynchronously && asyncSchedulerThread != null) {
asyncSchedulerThread.interrupt();
asyncSchedulerThread.join(THREAD_JOIN_TIMEOUT_MS);
}
}
super.serviceStop();
}
@Override
public synchronized void
reinitialize(Configuration conf, RMContext rmContext) throws IOException {
Configuration configuration = new Configuration(conf);
CapacitySchedulerConfiguration oldConf = this.conf;
this.conf = loadCapacitySchedulerConfiguration(configuration);
validateConf(this.conf);
try {
LOG.info("Re-initializing queues...");
refreshMaximumAllocation(this.conf.getMaximumAllocation());
reinitializeQueues(this.conf);
} catch (Throwable t) {
this.conf = oldConf;
refreshMaximumAllocation(this.conf.getMaximumAllocation());
throw new IOException("Failed to re-init queues", t);
}
}
long getAsyncScheduleInterval() {
return asyncScheduleInterval;
}
private final static Random random = new Random(System.currentTimeMillis());
/**
* Schedule on all nodes by starting at a random point.
* @param cs
*/
static void schedule(CapacityScheduler cs) {
// First randomize the start point
int current = 0;
Collection<FiCaSchedulerNode> nodes = cs.getAllNodes().values();
int start = random.nextInt(nodes.size());
for (FiCaSchedulerNode node : nodes) {
if (current++ >= start) {
cs.allocateContainersToNode(node);
}
}
// Now, just get everyone to be safe
for (FiCaSchedulerNode node : nodes) {
cs.allocateContainersToNode(node);
}
try {
Thread.sleep(cs.getAsyncScheduleInterval());
} catch (InterruptedException e) {}
}
static class AsyncScheduleThread extends Thread {
private final CapacityScheduler cs;
private AtomicBoolean runSchedules = new AtomicBoolean(false);
public AsyncScheduleThread(CapacityScheduler cs) {
this.cs = cs;
setDaemon(true);
}
@Override
public void run() {
while (true) {
if (!runSchedules.get()) {
try {
Thread.sleep(100);
} catch (InterruptedException ie) {}
} else {
schedule(cs);
}
}
}
public void beginSchedule() {
runSchedules.set(true);
}
public void suspendSchedule() {
runSchedules.set(false);
}
}
@Private
public static final String ROOT_QUEUE =
CapacitySchedulerConfiguration.PREFIX + CapacitySchedulerConfiguration.ROOT;
static class QueueHook {
public CSQueue hook(CSQueue queue) {
return queue;
}
}
private static final QueueHook noop = new QueueHook();
private void initializeQueueMappings() throws IOException {
overrideWithQueueMappings = conf.getOverrideWithQueueMappings();
LOG.info("Initialized queue mappings, override: "
+ overrideWithQueueMappings);
// Get new user/group mappings
List<QueueMapping> newMappings = conf.getQueueMappings();
//check if mappings refer to valid queues
for (QueueMapping mapping : newMappings) {
if (!mapping.queue.equals(CURRENT_USER_MAPPING) &&
!mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
CSQueue queue = queues.get(mapping.queue);
if (queue == null || !(queue instanceof LeafQueue)) {
throw new IOException(
"mapping contains invalid or non-leaf queue " + mapping.queue);
}
}
}
//apply the new mappings since they are valid
mappings = newMappings;
// initialize groups if mappings are present
if (mappings.size() > 0) {
groups = new Groups(conf);
}
}
@Lock(CapacityScheduler.class)
private void initializeQueues(CapacitySchedulerConfiguration conf)
throws IOException {
root =
parseQueue(this, conf, null, CapacitySchedulerConfiguration.ROOT,
queues, queues, noop);
labelManager.reinitializeQueueLabels(getQueueToLabels());
LOG.info("Initialized root queue " + root);
initializeQueueMappings();
setQueueAcls(authorizer, queues);
}
@Lock(CapacityScheduler.class)
private void reinitializeQueues(CapacitySchedulerConfiguration conf)
throws IOException {
// Parse new queues
Map<String, CSQueue> newQueues = new HashMap<String, CSQueue>();
CSQueue newRoot =
parseQueue(this, conf, null, CapacitySchedulerConfiguration.ROOT,
newQueues, queues, noop);
// Ensure all existing queues are still present
validateExistingQueues(queues, newQueues);
// Add new queues
addNewQueues(queues, newQueues);
// Re-configure queues
root.reinitialize(newRoot, clusterResource);
initializeQueueMappings();
// Re-calculate headroom for active applications
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
labelManager.reinitializeQueueLabels(getQueueToLabels());
setQueueAcls(authorizer, queues);
}
@VisibleForTesting
public static void setQueueAcls(YarnAuthorizationProvider authorizer,
Map<String, CSQueue> queues) throws IOException {
for (CSQueue queue : queues.values()) {
AbstractCSQueue csQueue = (AbstractCSQueue) queue;
authorizer.setPermission(csQueue.getPrivilegedEntity(),
csQueue.getACLs(), UserGroupInformation.getCurrentUser());
}
}
private Map<String, Set<String>> getQueueToLabels() {
Map<String, Set<String>> queueToLabels = new HashMap<String, Set<String>>();
for (CSQueue queue : queues.values()) {
queueToLabels.put(queue.getQueueName(), queue.getAccessibleNodeLabels());
}
return queueToLabels;
}
/**
* Ensure all existing queues are present. Queues cannot be deleted
* @param queues existing queues
* @param newQueues new queues
*/
@Lock(CapacityScheduler.class)
private void validateExistingQueues(
Map<String, CSQueue> queues, Map<String, CSQueue> newQueues)
throws IOException {
// check that all static queues are included in the newQueues list
for (Map.Entry<String, CSQueue> e : queues.entrySet()) {
if (!(e.getValue() instanceof ReservationQueue)) {
String queueName = e.getKey();
CSQueue oldQueue = e.getValue();
CSQueue newQueue = newQueues.get(queueName);
if (null == newQueue) {
throw new IOException(queueName + " cannot be found during refresh!");
} else if (!oldQueue.getQueuePath().equals(newQueue.getQueuePath())) {
throw new IOException(queueName + " is moved from:"
+ oldQueue.getQueuePath() + " to:" + newQueue.getQueuePath()
+ " after refresh, which is not allowed.");
}
}
}
}
/**
* Add the new queues (only) to our list of queues...
* ... be careful, do not overwrite existing queues.
* @param queues
* @param newQueues
*/
@Lock(CapacityScheduler.class)
private void addNewQueues(
Map<String, CSQueue> queues, Map<String, CSQueue> newQueues)
{
for (Map.Entry<String, CSQueue> e : newQueues.entrySet()) {
String queueName = e.getKey();
CSQueue queue = e.getValue();
if (!queues.containsKey(queueName)) {
queues.put(queueName, queue);
}
}
}
@Lock(CapacityScheduler.class)
static CSQueue parseQueue(
CapacitySchedulerContext csContext,
CapacitySchedulerConfiguration conf,
CSQueue parent, String queueName, Map<String, CSQueue> queues,
Map<String, CSQueue> oldQueues,
QueueHook hook) throws IOException {
CSQueue queue;
String fullQueueName =
(parent == null) ? queueName
: (parent.getQueuePath() + "." + queueName);
String[] childQueueNames =
conf.getQueues(fullQueueName);
boolean isReservableQueue = conf.isReservable(fullQueueName);
if (childQueueNames == null || childQueueNames.length == 0) {
if (null == parent) {
throw new IllegalStateException(
"Queue configuration missing child queue names for " + queueName);
}
// Check if the queue will be dynamically managed by the Reservation
// system
if (isReservableQueue) {
queue =
new PlanQueue(csContext, queueName, parent,
oldQueues.get(queueName));
} else {
queue =
new LeafQueue(csContext, queueName, parent,
oldQueues.get(queueName));
// Used only for unit tests
queue = hook.hook(queue);
}
} else {
if (isReservableQueue) {
throw new IllegalStateException(
"Only Leaf Queues can be reservable for " + queueName);
}
ParentQueue parentQueue =
new ParentQueue(csContext, queueName, parent, oldQueues.get(queueName));
// Used only for unit tests
queue = hook.hook(parentQueue);
List<CSQueue> childQueues = new ArrayList<CSQueue>();
for (String childQueueName : childQueueNames) {
CSQueue childQueue =
parseQueue(csContext, conf, queue, childQueueName,
queues, oldQueues, hook);
childQueues.add(childQueue);
}
parentQueue.setChildQueues(childQueues);
}
if(queue instanceof LeafQueue == true && queues.containsKey(queueName)
&& queues.get(queueName) instanceof LeafQueue == true) {
throw new IOException("Two leaf queues were named " + queueName
+ ". Leaf queue names must be distinct");
}
queues.put(queueName, queue);
LOG.info("Initialized queue: " + queue);
return queue;
}
public CSQueue getQueue(String queueName) {
if (queueName == null) {
return null;
}
return queues.get(queueName);
}
private static final String CURRENT_USER_MAPPING = "%user";
private static final String PRIMARY_GROUP_MAPPING = "%primary_group";
private String getMappedQueue(String user) throws IOException {
for (QueueMapping mapping : mappings) {
if (mapping.type == MappingType.USER) {
if (mapping.source.equals(CURRENT_USER_MAPPING)) {
if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
return user;
}
else if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
return groups.getGroups(user).get(0);
}
else {
return mapping.queue;
}
}
if (user.equals(mapping.source)) {
return mapping.queue;
}
}
if (mapping.type == MappingType.GROUP) {
for (String userGroups : groups.getGroups(user)) {
if (userGroups.equals(mapping.source)) {
return mapping.queue;
}
}
}
}
return null;
}
private synchronized void addApplication(ApplicationId applicationId,
String queueName, String user, boolean isAppRecovering, Priority priority) {
if (mappings != null && mappings.size() > 0) {
try {
String mappedQueue = getMappedQueue(user);
if (mappedQueue != null) {
// We have a mapping, should we use it?
if (queueName.equals(YarnConfiguration.DEFAULT_QUEUE_NAME)
|| overrideWithQueueMappings) {
LOG.info("Application " + applicationId + " user " + user
+ " mapping [" + queueName + "] to [" + mappedQueue
+ "] override " + overrideWithQueueMappings);
queueName = mappedQueue;
RMApp rmApp = rmContext.getRMApps().get(applicationId);
rmApp.setQueue(queueName);
}
}
} catch (IOException ioex) {
String message = "Failed to submit application " + applicationId +
" submitted by user " + user + " reason: " + ioex.getMessage();
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return;
}
}
// sanity checks.
CSQueue queue = getQueue(queueName);
if (queue == null) {
//During a restart, this indicates a queue was removed, which is
//not presently supported
if (isAppRecovering) {
String queueErrorMsg = "Queue named " + queueName
+ " missing during application recovery."
+ " Queue removal during recovery is not presently supported by the"
+ " capacity scheduler, please restart with all queues configured"
+ " which were present before shutdown/restart.";
LOG.fatal(queueErrorMsg);
throw new QueueNotFoundException(queueErrorMsg);
}
String message = "Application " + applicationId +
" submitted by user " + user + " to unknown queue: " + queueName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return;
}
if (!(queue instanceof LeafQueue)) {
String message = "Application " + applicationId +
" submitted by user " + user + " to non-leaf queue: " + queueName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return;
}
// Submit to the queue
try {
queue.submitApplication(applicationId, user, queueName);
} catch (AccessControlException ace) {
// Ignore the exception for recovered app as the app was previously accepted
if (!isAppRecovering) {
LOG.info("Failed to submit application " + applicationId + " to queue "
+ queueName + " from user " + user, ace);
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, ace.toString()));
return;
}
}
// update the metrics
queue.getMetrics().submitApp(user);
SchedulerApplication<FiCaSchedulerApp> application =
new SchedulerApplication<FiCaSchedulerApp>(queue, user, priority);
applications.put(applicationId, application);
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", in queue: " + queueName);
if (isAppRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
}
} else {
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
}
}
private synchronized void addApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
boolean transferStateFromPreviousAttempt,
boolean isAttemptRecovering) {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationAttemptId.getApplicationId());
CSQueue queue = (CSQueue) application.getQueue();
FiCaSchedulerApp attempt = new FiCaSchedulerApp(applicationAttemptId,
application.getUser(), queue, queue.getActiveUsersManager(), rmContext,
application.getPriority());
if (transferStateFromPreviousAttempt) {
attempt.transferStateFromPreviousAttempt(application
.getCurrentAppAttempt());
}
application.setCurrentAppAttempt(attempt);
queue.submitApplicationAttempt(attempt, application.getUser());
LOG.info("Added Application Attempt " + applicationAttemptId
+ " to scheduler from user " + application.getUser() + " in queue "
+ queue.getQueueName());
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationAttemptId
+ " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else {
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId,
RMAppAttemptEventType.ATTEMPT_ADDED));
}
}
private synchronized void doneApplication(ApplicationId applicationId,
RMAppState finalState) {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationId);
if (application == null){
// The AppRemovedSchedulerEvent maybe sent on recovery for completed apps,
// ignore it.
LOG.warn("Couldn't find application " + applicationId);
return;
}
CSQueue queue = (CSQueue) application.getQueue();
if (!(queue instanceof LeafQueue)) {
LOG.error("Cannot finish application " + "from non-leaf queue: "
+ queue.getQueueName());
} else {
queue.finishApplication(applicationId, application.getUser());
}
application.stop(finalState);
applications.remove(applicationId);
}
private synchronized void doneApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) {
LOG.info("Application Attempt " + applicationAttemptId + " is done." +
" finalState=" + rmAppAttemptFinalState);
FiCaSchedulerApp attempt = getApplicationAttempt(applicationAttemptId);
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(applicationAttemptId.getApplicationId());
if (application == null || attempt == null) {
LOG.info("Unknown application " + applicationAttemptId + " has completed!");
return;
}
// Release all the allocated, acquired, running containers
for (RMContainer rmContainer : attempt.getLiveContainers()) {
if (keepContainers
&& rmContainer.getState().equals(RMContainerState.RUNNING)) {
// do not kill the running container in the case of work-preserving AM
// restart.
LOG.info("Skip killing " + rmContainer.getContainerId());
continue;
}
completedContainer(
rmContainer,
SchedulerUtils.createAbnormalContainerStatus(
rmContainer.getContainerId(), SchedulerUtils.COMPLETED_APPLICATION),
RMContainerEventType.KILL);
}
// Release all reserved containers
for (RMContainer rmContainer : attempt.getReservedContainers()) {
completedContainer(
rmContainer,
SchedulerUtils.createAbnormalContainerStatus(
rmContainer.getContainerId(), "Application Complete"),
RMContainerEventType.KILL);
}
// Clean up pending requests, metrics etc.
attempt.stop(rmAppAttemptFinalState);
// Inform the queue
String queueName = attempt.getQueue().getQueueName();
CSQueue queue = queues.get(queueName);
if (!(queue instanceof LeafQueue)) {
LOG.error("Cannot finish application " + "from non-leaf queue: "
+ queueName);
} else {
queue.finishApplicationAttempt(attempt, queue.getQueueName());
}
}
@Override
@Lock(Lock.NoLock.class)
public Allocation allocate(ApplicationAttemptId applicationAttemptId,
List<ResourceRequest> ask, List<ContainerId> release,
List<String> blacklistAdditions, List<String> blacklistRemovals) {
FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
if (application == null) {
LOG.info("Calling allocate on removed " +
"or non existant application " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
// Sanity check
SchedulerUtils.normalizeRequests(
ask, getResourceCalculator(), getClusterResource(),
getMinimumResourceCapability(), getMaximumResourceCapability());
// Release containers
releaseContainers(release, application);
Allocation allocation;
LeafQueue updateDemandForQueue = null;
synchronized (application) {
// make sure we aren't stopping/removing the application
// when the allocate comes in
if (application.isStopped()) {
LOG.info("Calling allocate on a stopped " +
"application " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
if (!ask.isEmpty()) {
if(LOG.isDebugEnabled()) {
LOG.debug("allocate: pre-update" +
" applicationAttemptId=" + applicationAttemptId +
" application=" + application);
}
application.showRequests();
// Update application requests
if (application.updateResourceRequests(ask)) {
updateDemandForQueue = (LeafQueue) application.getQueue();
}
LOG.debug("allocate: post-update");
application.showRequests();
}
if(LOG.isDebugEnabled()) {
LOG.debug("allocate:" +
" applicationAttemptId=" + applicationAttemptId +
" #ask=" + ask.size());
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
allocation = application.getAllocation(getResourceCalculator(),
clusterResource, getMinimumResourceCapability());
}
if (updateDemandForQueue != null) {
updateDemandForQueue.getOrderingPolicy().demandUpdated(application);
}
return allocation;
}
@Override
@Lock(Lock.NoLock.class)
public QueueInfo getQueueInfo(String queueName,
boolean includeChildQueues, boolean recursive)
throws IOException {
CSQueue queue = null;
queue = this.queues.get(queueName);
if (queue == null) {
throw new IOException("Unknown queue: " + queueName);
}
return queue.getQueueInfo(includeChildQueues, recursive);
}
@Override
@Lock(Lock.NoLock.class)
public List<QueueUserACLInfo> getQueueUserAclInfo() {
UserGroupInformation user = null;
try {
user = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
// should never happen
return new ArrayList<QueueUserACLInfo>();
}
return root.getQueueUserAclInfo(user);
}
private synchronized void nodeUpdate(RMNode nm) {
if (LOG.isDebugEnabled()) {
LOG.debug("nodeUpdate: " + nm + " clusterResources: " + clusterResource);
}
Resource releaseResources = Resource.newInstance(0, 0);
FiCaSchedulerNode node = getNode(nm.getNodeID());
List<UpdatedContainerInfo> containerInfoList = nm.pullContainerUpdates();
List<ContainerStatus> newlyLaunchedContainers = new ArrayList<ContainerStatus>();
List<ContainerStatus> completedContainers = new ArrayList<ContainerStatus>();
for(UpdatedContainerInfo containerInfo : containerInfoList) {
newlyLaunchedContainers.addAll(containerInfo.getNewlyLaunchedContainers());
completedContainers.addAll(containerInfo.getCompletedContainers());
}
// Processing the newly launched containers
for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
containerLaunchedOnNode(launchedContainer.getContainerId(), node);
}
// Process completed containers
int releasedContainers = 0;
for (ContainerStatus completedContainer : completedContainers) {
ContainerId containerId = completedContainer.getContainerId();
RMContainer container = getRMContainer(containerId);
LOG.debug("Container FINISHED: " + containerId);
completedContainer(container, completedContainer,
RMContainerEventType.FINISHED);
if (container != null) {
releasedContainers++;
Resource rs = container.getAllocatedResource();
if (rs != null) {
Resources.addTo(releaseResources, rs);
}
rs = container.getReservedResource();
if (rs != null) {
Resources.addTo(releaseResources, rs);
}
}
}
schedulerHealth.updateSchedulerReleaseDetails(lastNodeUpdateTime,
releaseResources);
schedulerHealth.updateSchedulerReleaseCounts(releasedContainers);
// Now node data structures are upto date and ready for scheduling.
if(LOG.isDebugEnabled()) {
LOG.debug("Node being looked for scheduling " + nm
+ " availableResource: " + node.getAvailableResource());
}
}
/**
* Process resource update on a node.
*/
private synchronized void updateNodeAndQueueResource(RMNode nm,
ResourceOption resourceOption) {
updateNodeResource(nm, resourceOption);
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
}
/**
* Process node labels update on a node.
*
* TODO: Currently capacity scheduler will kill containers on a node when
* labels on the node changed. It is a simply solution to ensure guaranteed
* capacity on labels of queues. When YARN-2498 completed, we can let
* preemption policy to decide if such containers need to be killed or just
* keep them running.
*/
private synchronized void updateLabelsOnNode(NodeId nodeId,
Set<String> newLabels) {
FiCaSchedulerNode node = nodes.get(nodeId);
if (null == node) {
return;
}
// labels is same, we don't need do update
if (node.getLabels().size() == newLabels.size()
&& node.getLabels().containsAll(newLabels)) {
return;
}
// Kill running containers since label is changed
for (RMContainer rmContainer : node.getRunningContainers()) {
ContainerId containerId = rmContainer.getContainerId();
completedContainer(rmContainer,
ContainerStatus.newInstance(containerId,
ContainerState.COMPLETE,
String.format(
"Container=%s killed since labels on the node=%s changed",
containerId.toString(), nodeId.toString()),
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL);
}
// Unreserve container on this node
RMContainer reservedContainer = node.getReservedContainer();
if (null != reservedContainer) {
dropContainerReservation(reservedContainer);
}
// Update node labels after we've done this
node.updateLabels(newLabels);
}
private void updateSchedulerHealth(long now, FiCaSchedulerNode node,
CSAssignment assignment) {
NodeId nodeId = node.getNodeID();
List<AssignmentInformation.AssignmentDetails> allocations =
assignment.getAssignmentInformation().getAllocationDetails();
List<AssignmentInformation.AssignmentDetails> reservations =
assignment.getAssignmentInformation().getReservationDetails();
if (!allocations.isEmpty()) {
ContainerId allocatedContainerId =
allocations.get(allocations.size() - 1).containerId;
String allocatedQueue = allocations.get(allocations.size() - 1).queue;
schedulerHealth.updateAllocation(now, nodeId, allocatedContainerId,
allocatedQueue);
}
if (!reservations.isEmpty()) {
ContainerId reservedContainerId =
reservations.get(reservations.size() - 1).containerId;
String reservedQueue = reservations.get(reservations.size() - 1).queue;
schedulerHealth.updateReservation(now, nodeId, reservedContainerId,
reservedQueue);
}
schedulerHealth.updateSchedulerReservationCounts(assignment
.getAssignmentInformation().getNumReservations());
schedulerHealth.updateSchedulerAllocationCounts(assignment
.getAssignmentInformation().getNumAllocations());
schedulerHealth.updateSchedulerRunDetails(now, assignment
.getAssignmentInformation().getAllocated(), assignment
.getAssignmentInformation().getReserved());
}
private synchronized void allocateContainersToNode(FiCaSchedulerNode node) {
if (rmContext.isWorkPreservingRecoveryEnabled()
&& !rmContext.isSchedulerReadyForAllocatingContainers()) {
return;
}
// reset allocation and reservation stats before we start doing any work
updateSchedulerHealth(lastNodeUpdateTime, node,
new CSAssignment(Resources.none(), NodeType.NODE_LOCAL));
CSAssignment assignment;
// Assign new containers...
// 1. Check for reserved applications
// 2. Schedule if there are no reservations
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
FiCaSchedulerApp reservedApplication =
getCurrentAttemptForContainer(reservedContainer.getContainerId());
// Try to fulfill the reservation
LOG.info("Trying to fulfill reservation for application "
+ reservedApplication.getApplicationId() + " on node: "
+ node.getNodeID());
LeafQueue queue = ((LeafQueue) reservedApplication.getQueue());
assignment =
queue.assignContainers(
clusterResource,
node,
// TODO, now we only consider limits for parent for non-labeled
// resources, should consider labeled resources as well.
new ResourceLimits(labelManager.getResourceByLabel(
RMNodeLabelsManager.NO_LABEL, clusterResource)),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
if (assignment.isFulfilledReservation()) {
CSAssignment tmp =
new CSAssignment(reservedContainer.getReservedResource(),
assignment.getType());
Resources.addTo(assignment.getAssignmentInformation().getAllocated(),
reservedContainer.getReservedResource());
tmp.getAssignmentInformation().addAllocationDetails(
reservedContainer.getContainerId(), queue.getQueuePath());
tmp.getAssignmentInformation().incrAllocations();
updateSchedulerHealth(lastNodeUpdateTime, node, tmp);
schedulerHealth.updateSchedulerFulfilledReservationCounts(1);
}
}
// Try to schedule more if there are no reservations to fulfill
if (node.getReservedContainer() == null) {
if (calculator.computeAvailableContainers(node.getAvailableResource(),
minimumAllocation) > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to schedule on node: " + node.getNodeName() +
", available: " + node.getAvailableResource());
}
assignment = root.assignContainers(
clusterResource,
node,
// TODO, now we only consider limits for parent for non-labeled
// resources, should consider labeled resources as well.
new ResourceLimits(labelManager.getResourceByLabel(
RMNodeLabelsManager.NO_LABEL, clusterResource)),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
if (Resources.greaterThan(calculator, clusterResource,
assignment.getResource(), Resources.none())) {
updateSchedulerHealth(lastNodeUpdateTime, node, assignment);
return;
}
// Only do non-exclusive allocation when node has node-labels.
if (StringUtils.equals(node.getPartition(),
RMNodeLabelsManager.NO_LABEL)) {
return;
}
// Only do non-exclusive allocation when the node-label supports that
try {
if (rmContext.getNodeLabelManager().isExclusiveNodeLabel(
node.getPartition())) {
return;
}
} catch (IOException e) {
LOG.warn("Exception when trying to get exclusivity of node label="
+ node.getPartition(), e);
return;
}
// Try to use NON_EXCLUSIVE
assignment = root.assignContainers(
clusterResource,
node,
// TODO, now we only consider limits for parent for non-labeled
// resources, should consider labeled resources as well.
new ResourceLimits(labelManager.getResourceByLabel(
RMNodeLabelsManager.NO_LABEL, clusterResource)),
SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY);
updateSchedulerHealth(lastNodeUpdateTime, node, assignment);
}
} else {
LOG.info("Skipping scheduling since node "
+ node.getNodeID()
+ " is reserved by application "
+ node.getReservedContainer().getContainerId()
.getApplicationAttemptId());
}
}
@Override
public void handle(SchedulerEvent event) {
switch(event.getType()) {
case NODE_ADDED:
{
NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
addNode(nodeAddedEvent.getAddedRMNode());
recoverContainersOnNode(nodeAddedEvent.getContainerReports(),
nodeAddedEvent.getAddedRMNode());
}
break;
case NODE_REMOVED:
{
NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
removeNode(nodeRemovedEvent.getRemovedRMNode());
}
break;
case NODE_RESOURCE_UPDATE:
{
NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent =
(NodeResourceUpdateSchedulerEvent)event;
updateNodeAndQueueResource(nodeResourceUpdatedEvent.getRMNode(),
nodeResourceUpdatedEvent.getResourceOption());
}
break;
case NODE_LABELS_UPDATE:
{
NodeLabelsUpdateSchedulerEvent labelUpdateEvent =
(NodeLabelsUpdateSchedulerEvent) event;
for (Entry<NodeId, Set<String>> entry : labelUpdateEvent
.getUpdatedNodeToLabels().entrySet()) {
NodeId id = entry.getKey();
Set<String> labels = entry.getValue();
updateLabelsOnNode(id, labels);
}
}
break;
case NODE_UPDATE:
{
NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event;
RMNode node = nodeUpdatedEvent.getRMNode();
setLastNodeUpdateTime(Time.now());
nodeUpdate(node);
if (!scheduleAsynchronously) {
allocateContainersToNode(getNode(node.getNodeID()));
}
}
break;
case APP_ADDED:
{
AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
String queueName =
resolveReservationQueueName(appAddedEvent.getQueue(),
appAddedEvent.getApplicationId(),
appAddedEvent.getReservationID());
if (queueName != null) {
addApplication(appAddedEvent.getApplicationId(),
queueName,
appAddedEvent.getUser(),
appAddedEvent.getIsAppRecovering(),
appAddedEvent.getApplicatonPriority());
}
}
break;
case APP_REMOVED:
{
AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
doneApplication(appRemovedEvent.getApplicationID(),
appRemovedEvent.getFinalState());
}
break;
case APP_ATTEMPT_ADDED:
{
AppAttemptAddedSchedulerEvent appAttemptAddedEvent =
(AppAttemptAddedSchedulerEvent) event;
addApplicationAttempt(appAttemptAddedEvent.getApplicationAttemptId(),
appAttemptAddedEvent.getTransferStateFromPreviousAttempt(),
appAttemptAddedEvent.getIsAttemptRecovering());
}
break;
case APP_ATTEMPT_REMOVED:
{
AppAttemptRemovedSchedulerEvent appAttemptRemovedEvent =
(AppAttemptRemovedSchedulerEvent) event;
doneApplicationAttempt(appAttemptRemovedEvent.getApplicationAttemptID(),
appAttemptRemovedEvent.getFinalAttemptState(),
appAttemptRemovedEvent.getKeepContainersAcrossAppAttempts());
}
break;
case CONTAINER_EXPIRED:
{
ContainerExpiredSchedulerEvent containerExpiredEvent =
(ContainerExpiredSchedulerEvent) event;
ContainerId containerId = containerExpiredEvent.getContainerId();
completedContainer(getRMContainer(containerId),
SchedulerUtils.createAbnormalContainerStatus(
containerId,
SchedulerUtils.EXPIRED_CONTAINER),
RMContainerEventType.EXPIRE);
}
break;
case DROP_RESERVATION:
{
ContainerPreemptEvent dropReservationEvent = (ContainerPreemptEvent)event;
RMContainer container = dropReservationEvent.getContainer();
dropContainerReservation(container);
}
break;
case PREEMPT_CONTAINER:
{
ContainerPreemptEvent preemptContainerEvent =
(ContainerPreemptEvent)event;
ApplicationAttemptId aid = preemptContainerEvent.getAppId();
RMContainer containerToBePreempted = preemptContainerEvent.getContainer();
preemptContainer(aid, containerToBePreempted);
}
break;
case KILL_CONTAINER:
{
ContainerPreemptEvent killContainerEvent = (ContainerPreemptEvent)event;
RMContainer containerToBeKilled = killContainerEvent.getContainer();
killContainer(containerToBeKilled);
}
break;
case CONTAINER_RESCHEDULED:
{
ContainerRescheduledEvent containerRescheduledEvent =
(ContainerRescheduledEvent) event;
RMContainer container = containerRescheduledEvent.getContainer();
recoverResourceRequestForContainer(container);
}
break;
default:
LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
}
}
private synchronized void addNode(RMNode nodeManager) {
FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
usePortForNodeName, nodeManager.getNodeLabels());
this.nodes.put(nodeManager.getNodeID(), schedulerNode);
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
// update this node to node label manager
if (labelManager != null) {
labelManager.activateNode(nodeManager.getNodeID(),
nodeManager.getTotalCapability());
}
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
int numNodes = numNodeManagers.incrementAndGet();
updateMaximumAllocation(schedulerNode, true);
LOG.info("Added node " + nodeManager.getNodeAddress() +
" clusterResource: " + clusterResource);
if (scheduleAsynchronously && numNodes == 1) {
asyncSchedulerThread.beginSchedule();
}
}
private synchronized void removeNode(RMNode nodeInfo) {
// update this node to node label manager
if (labelManager != null) {
labelManager.deactivateNode(nodeInfo.getNodeID());
}
FiCaSchedulerNode node = nodes.get(nodeInfo.getNodeID());
if (node == null) {
return;
}
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
int numNodes = numNodeManagers.decrementAndGet();
if (scheduleAsynchronously && numNodes == 0) {
asyncSchedulerThread.suspendSchedule();
}
// Remove running containers
List<RMContainer> runningContainers = node.getRunningContainers();
for (RMContainer container : runningContainers) {
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
// Remove reservations, if any
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
completedContainer(reservedContainer,
SchedulerUtils.createAbnormalContainerStatus(
reservedContainer.getContainerId(),
SchedulerUtils.LOST_CONTAINER),
RMContainerEventType.KILL);
}
this.nodes.remove(nodeInfo.getNodeID());
updateMaximumAllocation(node, false);
LOG.info("Removed node " + nodeInfo.getNodeAddress() +
" clusterResource: " + clusterResource);
}
@Lock(CapacityScheduler.class)
@Override
protected synchronized void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
if (rmContainer == null) {
LOG.info("Container " + containerStatus.getContainerId() +
" completed with event " + event);
return;
}
Container container = rmContainer.getContainer();
// Get the application for the finished container
FiCaSchedulerApp application =
getCurrentAttemptForContainer(container.getId());
ApplicationId appId =
container.getId().getApplicationAttemptId().getApplicationId();
if (application == null) {
LOG.info("Container " + container + " of" + " finished application "
+ appId + " completed with event " + event);
return;
}
// Get the node on which the container was allocated
FiCaSchedulerNode node = getNode(container.getNodeId());
// Inform the queue
LeafQueue queue = (LeafQueue)application.getQueue();
queue.completedContainer(clusterResource, application, node,
rmContainer, containerStatus, event, null, true);
LOG.info("Application attempt " + application.getApplicationAttemptId()
+ " released container " + container.getId() + " on node: " + node
+ " with event: " + event);
if (containerStatus.getExitStatus() == ContainerExitStatus.PREEMPTED) {
schedulerHealth.updatePreemption(Time.now(), container.getNodeId(),
container.getId(), queue.getQueuePath());
schedulerHealth.updateSchedulerPreemptionCounts(1);
} else {
schedulerHealth.updateRelease(lastNodeUpdateTime, container.getNodeId(),
container.getId(), queue.getQueuePath());
}
}
@Lock(Lock.NoLock.class)
@VisibleForTesting
@Override
public FiCaSchedulerApp getApplicationAttempt(
ApplicationAttemptId applicationAttemptId) {
return super.getApplicationAttempt(applicationAttemptId);
}
@Lock(Lock.NoLock.class)
public FiCaSchedulerNode getNode(NodeId nodeId) {
return nodes.get(nodeId);
}
@Lock(Lock.NoLock.class)
Map<NodeId, FiCaSchedulerNode> getAllNodes() {
return nodes;
}
@Override
@Lock(Lock.NoLock.class)
public void recover(RMState state) throws Exception {
// NOT IMPLEMENTED
}
@Override
public void dropContainerReservation(RMContainer container) {
if(LOG.isDebugEnabled()){
LOG.debug("DROP_RESERVATION:" + container.toString());
}
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.UNRESERVED_CONTAINER),
RMContainerEventType.KILL);
}
@Override
public void preemptContainer(ApplicationAttemptId aid, RMContainer cont) {
if(LOG.isDebugEnabled()){
LOG.debug("PREEMPT_CONTAINER: application:" + aid.toString() +
" container: " + cont.toString());
}
FiCaSchedulerApp app = getApplicationAttempt(aid);
if (app != null) {
app.addPreemptContainer(cont.getContainerId());
}
}
@Override
public void killContainer(RMContainer cont) {
if (LOG.isDebugEnabled()) {
LOG.debug("KILL_CONTAINER: container" + cont.toString());
}
completedContainer(cont, SchedulerUtils.createPreemptedContainerStatus(
cont.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER),
RMContainerEventType.KILL);
}
@Override
public synchronized boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName) {
CSQueue queue = getQueue(queueName);
if (queue == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("ACL not found for queue access-type " + acl
+ " for queue " + queueName);
}
return false;
}
return queue.hasAccess(acl, callerUGI);
}
@Override
public List<ApplicationAttemptId> getAppsInQueue(String queueName) {
CSQueue queue = queues.get(queueName);
if (queue == null) {
return null;
}
List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>();
queue.collectSchedulerApplications(apps);
return apps;
}
private CapacitySchedulerConfiguration loadCapacitySchedulerConfiguration(
Configuration configuration) throws IOException {
try {
InputStream CSInputStream =
this.rmContext.getConfigurationProvider()
.getConfigurationInputStream(configuration,
YarnConfiguration.CS_CONFIGURATION_FILE);
if (CSInputStream != null) {
configuration.addResource(CSInputStream);
return new CapacitySchedulerConfiguration(configuration, false);
}
return new CapacitySchedulerConfiguration(configuration, true);
} catch (Exception e) {
throw new IOException(e);
}
}
private synchronized String resolveReservationQueueName(String queueName,
ApplicationId applicationId, ReservationId reservationID) {
CSQueue queue = getQueue(queueName);
// Check if the queue is a plan queue
if ((queue == null) || !(queue instanceof PlanQueue)) {
return queueName;
}
if (reservationID != null) {
String resQName = reservationID.toString();
queue = getQueue(resQName);
if (queue == null) {
String message =
"Application "
+ applicationId
+ " submitted to a reservation which is not yet currently active: "
+ resQName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return null;
}
if (!queue.getParent().getQueueName().equals(queueName)) {
String message =
"Application: " + applicationId + " submitted to a reservation "
+ resQName + " which does not belong to the specified queue: "
+ queueName;
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppRejectedEvent(applicationId, message));
return null;
}
// use the reservation queue to run the app
queueName = resQName;
} else {
// use the default child queue of the plan for unreserved apps
queueName = queueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
}
return queueName;
}
@Override
public synchronized void removeQueue(String queueName)
throws SchedulerDynamicEditException {
LOG.info("Removing queue: " + queueName);
CSQueue q = this.getQueue(queueName);
if (!(q instanceof ReservationQueue)) {
throw new SchedulerDynamicEditException("The queue that we are asked "
+ "to remove (" + queueName + ") is not a ReservationQueue");
}
ReservationQueue disposableLeafQueue = (ReservationQueue) q;
// at this point we should have no more apps
if (disposableLeafQueue.getNumApplications() > 0) {
throw new SchedulerDynamicEditException("The queue " + queueName
+ " is not empty " + disposableLeafQueue.getApplications().size()
+ " active apps " + disposableLeafQueue.pendingApplications.size()
+ " pending apps");
}
((PlanQueue) disposableLeafQueue.getParent()).removeChildQueue(q);
this.queues.remove(queueName);
LOG.info("Removal of ReservationQueue " + queueName + " has succeeded");
}
@Override
public synchronized void addQueue(Queue queue)
throws SchedulerDynamicEditException {
if (!(queue instanceof ReservationQueue)) {
throw new SchedulerDynamicEditException("Queue " + queue.getQueueName()
+ " is not a ReservationQueue");
}
ReservationQueue newQueue = (ReservationQueue) queue;
if (newQueue.getParent() == null
|| !(newQueue.getParent() instanceof PlanQueue)) {
throw new SchedulerDynamicEditException("ParentQueue for "
+ newQueue.getQueueName()
+ " is not properly set (should be set and be a PlanQueue)");
}
PlanQueue parentPlan = (PlanQueue) newQueue.getParent();
String queuename = newQueue.getQueueName();
parentPlan.addChildQueue(newQueue);
this.queues.put(queuename, newQueue);
LOG.info("Creation of ReservationQueue " + newQueue + " succeeded");
}
@Override
public synchronized void setEntitlement(String inQueue,
QueueEntitlement entitlement) throws SchedulerDynamicEditException,
YarnException {
LeafQueue queue = getAndCheckLeafQueue(inQueue);
ParentQueue parent = (ParentQueue) queue.getParent();
if (!(queue instanceof ReservationQueue)) {
throw new SchedulerDynamicEditException("Entitlement can not be"
+ " modified dynamically since queue " + inQueue
+ " is not a ReservationQueue");
}
if (!(parent instanceof PlanQueue)) {
throw new SchedulerDynamicEditException("The parent of ReservationQueue "
+ inQueue + " must be an PlanQueue");
}
ReservationQueue newQueue = (ReservationQueue) queue;
float sumChilds = ((PlanQueue) parent).sumOfChildCapacities();
float newChildCap = sumChilds - queue.getCapacity() + entitlement.getCapacity();
if (newChildCap >= 0 && newChildCap < 1.0f + CSQueueUtils.EPSILON) {
// note: epsilon checks here are not ok, as the epsilons might accumulate
// and become a problem in aggregate
if (Math.abs(entitlement.getCapacity() - queue.getCapacity()) == 0
&& Math.abs(entitlement.getMaxCapacity() - queue.getMaximumCapacity()) == 0) {
return;
}
newQueue.setEntitlement(entitlement);
} else {
throw new SchedulerDynamicEditException(
"Sum of child queues would exceed 100% for PlanQueue: "
+ parent.getQueueName());
}
LOG.info("Set entitlement for ReservationQueue " + inQueue + " to "
+ queue.getCapacity() + " request was (" + entitlement.getCapacity() + ")");
}
@Override
public synchronized String moveApplication(ApplicationId appId,
String targetQueueName) throws YarnException {
FiCaSchedulerApp app =
getApplicationAttempt(ApplicationAttemptId.newInstance(appId, 0));
String sourceQueueName = app.getQueue().getQueueName();
LeafQueue source = getAndCheckLeafQueue(sourceQueueName);
String destQueueName = handleMoveToPlanQueue(targetQueueName);
LeafQueue dest = getAndCheckLeafQueue(destQueueName);
// Validation check - ACLs, submission limits for user & queue
String user = app.getUser();
try {
dest.submitApplication(appId, user, destQueueName);
} catch (AccessControlException e) {
throw new YarnException(e);
}
// Move all live containers
for (RMContainer rmContainer : app.getLiveContainers()) {
source.detachContainer(clusterResource, app, rmContainer);
// attach the Container to another queue
dest.attachContainer(clusterResource, app, rmContainer);
}
// Detach the application..
source.finishApplicationAttempt(app, sourceQueueName);
source.getParent().finishApplication(appId, app.getUser());
// Finish app & update metrics
app.move(dest);
// Submit to a new queue
dest.submitApplicationAttempt(app, user);
applications.get(appId).setQueue(dest);
LOG.info("App: " + app.getApplicationId() + " successfully moved from "
+ sourceQueueName + " to: " + destQueueName);
return targetQueueName;
}
/**
* Check that the String provided in input is the name of an existing,
* LeafQueue, if successful returns the queue.
*
* @param queue
* @return the LeafQueue
* @throws YarnException
*/
private LeafQueue getAndCheckLeafQueue(String queue) throws YarnException {
CSQueue ret = this.getQueue(queue);
if (ret == null) {
throw new YarnException("The specified Queue: " + queue
+ " doesn't exist");
}
if (!(ret instanceof LeafQueue)) {
throw new YarnException("The specified Queue: " + queue
+ " is not a Leaf Queue. Move is supported only for Leaf Queues.");
}
return (LeafQueue) ret;
}
/** {@inheritDoc} */
@Override
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes() {
if (calculator.getClass().getName()
.equals(DefaultResourceCalculator.class.getName())) {
return EnumSet.of(SchedulerResourceTypes.MEMORY);
}
return EnumSet
.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU);
}
@Override
public Resource getMaximumResourceCapability(String queueName) {
CSQueue queue = getQueue(queueName);
if (queue == null) {
LOG.error("Unknown queue: " + queueName);
return getMaximumResourceCapability();
}
if (!(queue instanceof LeafQueue)) {
LOG.error("queue " + queueName + " is not an leaf queue");
return getMaximumResourceCapability();
}
return ((LeafQueue)queue).getMaximumAllocation();
}
private String handleMoveToPlanQueue(String targetQueueName) {
CSQueue dest = getQueue(targetQueueName);
if (dest != null && dest instanceof PlanQueue) {
// use the default child reservation queue of the plan
targetQueueName = targetQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
}
return targetQueueName;
}
@Override
public Set<String> getPlanQueues() {
Set<String> ret = new HashSet<String>();
for (Map.Entry<String, CSQueue> l : queues.entrySet()) {
if (l.getValue() instanceof PlanQueue) {
ret.add(l.getKey());
}
}
return ret;
}
public SchedulerHealth getSchedulerHealth() {
return this.schedulerHealth;
}
private synchronized void setLastNodeUpdateTime(long time) {
this.lastNodeUpdateTime = time;
}
@Override
public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
String user, String queueName, ApplicationId applicationId)
throws YarnException {
Priority appPriority = null;
// ToDo: Verify against priority ACLs
// Verify the scenario where priority is null from submissionContext.
if (null == priorityFromContext) {
// Get the default priority for the Queue. If Queue is non-existent, then
// use default priority
priorityFromContext = getDefaultPriorityForQueue(queueName);
LOG.info("Application '" + applicationId
+ "' is submitted without priority "
+ "hence considering default queue/cluster priority:"
+ priorityFromContext.getPriority());
}
// Verify whether submitted priority is lesser than max priority
// in the cluster. If it is out of found, defining a max cap.
if (priorityFromContext.compareTo(getMaxClusterLevelAppPriority()) < 0) {
priorityFromContext = Priority
.newInstance(getMaxClusterLevelAppPriority().getPriority());
}
appPriority = priorityFromContext;
LOG.info("Priority '" + appPriority.getPriority()
+ "' is acceptable in queue :" + queueName + "for application:"
+ applicationId + "for the user: " + user);
return appPriority;
}
private Priority getDefaultPriorityForQueue(String queueName) {
Queue queue = getQueue(queueName);
if (null == queue || null == queue.getDefaultApplicationPriority()) {
// Return with default application priority
return Priority.newInstance(CapacitySchedulerConfiguration
.DEFAULT_CONFIGURATION_APPLICATION_PRIORITY);
}
return Priority.newInstance(queue.getDefaultApplicationPriority()
.getPriority());
}
public Priority getMaxClusterLevelAppPriority() {
return maxClusterLevelAppPriority;
}
}
| 70,501 | 36.421444 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
/**
* <code>CSQueue</code> represents a node in the tree of
* hierarchical queues in the {@link CapacityScheduler}.
*/
@Stable
@Private
public interface CSQueue
extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
/**
* Get the parent <code>Queue</code>.
* @return the parent queue
*/
public CSQueue getParent();
/**
* Set the parent <code>Queue</code>.
* @param newParentQueue new parent queue
*/
public void setParent(CSQueue newParentQueue);
/**
* Get the queue name.
* @return the queue name
*/
public String getQueueName();
/**
* Get the full name of the queue, including the heirarchy.
* @return the full name of the queue
*/
public String getQueuePath();
/**
* Get the configured <em>capacity</em> of the queue.
* @return configured queue capacity
*/
public float getCapacity();
/**
* Get capacity of the parent of the queue as a function of the
* cumulative capacity in the cluster.
* @return capacity of the parent of the queue as a function of the
* cumulative capacity in the cluster
*/
public float getAbsoluteCapacity();
/**
* Get the configured maximum-capacity of the queue.
* @return the configured maximum-capacity of the queue
*/
public float getMaximumCapacity();
/**
* Get maximum-capacity of the queue as a funciton of the cumulative capacity
* of the cluster.
* @return maximum-capacity of the queue as a funciton of the cumulative capacity
* of the cluster
*/
public float getAbsoluteMaximumCapacity();
/**
* Get the current absolute used capacity of the queue
* relative to the entire cluster.
* @return queue absolute used capacity
*/
public float getAbsoluteUsedCapacity();
/**
* Set used capacity of the queue.
* @param usedCapacity
* used capacity of the queue
*/
public void setUsedCapacity(float usedCapacity);
/**
* Set absolute used capacity of the queue.
* @param absUsedCapacity
* absolute used capacity of the queue
*/
public void setAbsoluteUsedCapacity(float absUsedCapacity);
/**
* Get the current used capacity of nodes without label(s) of the queue
* and it's children (if any).
* @return queue used capacity
*/
public float getUsedCapacity();
/**
* Get the currently utilized resources which allocated at nodes without any
* labels in the cluster by the queue and children (if any).
*
* @return used resources by the queue and it's children
*/
public Resource getUsedResources();
/**
* Get the current run-state of the queue
* @return current run-state
*/
public QueueState getState();
/**
* Get child queues
* @return child queues
*/
public List<CSQueue> getChildQueues();
/**
* Check if the <code>user</code> has permission to perform the operation
* @param acl ACL
* @param user user
* @return <code>true</code> if the user has the permission,
* <code>false</code> otherwise
*/
public boolean hasAccess(QueueACL acl, UserGroupInformation user);
/**
* Submit a new application to the queue.
* @param applicationId the applicationId of the application being submitted
* @param user user who submitted the application
* @param queue queue to which the application is submitted
*/
public void submitApplication(ApplicationId applicationId, String user,
String queue) throws AccessControlException;
/**
* Submit an application attempt to the queue.
*/
public void submitApplicationAttempt(FiCaSchedulerApp application,
String userName);
/**
* An application submitted to this queue has finished.
* @param applicationId
* @param user user who submitted the application
*/
public void finishApplication(ApplicationId applicationId, String user);
/**
* An application attempt submitted to this queue has finished.
*/
public void finishApplicationAttempt(FiCaSchedulerApp application,
String queue);
/**
* Assign containers to applications in the queue or it's children (if any).
* @param clusterResource the resource of the cluster.
* @param node node on which resources are available
* @param resourceLimits how much overall resource of this queue can use.
* @param schedulingMode Type of exclusive check when assign container on a
* NodeManager, see {@link SchedulingMode}.
* @return the assignment
*/
public CSAssignment assignContainers(Resource clusterResource,
FiCaSchedulerNode node, ResourceLimits resourceLimits,
SchedulingMode schedulingMode);
/**
* A container assigned to the queue has completed.
* @param clusterResource the resource of the cluster
* @param application application to which the container was assigned
* @param node node on which the container completed
* @param container completed container,
* <code>null</code> if it was just a reservation
* @param containerStatus <code>ContainerStatus</code> for the completed
* container
* @param childQueue <code>CSQueue</code> to reinsert in childQueues
* @param event event to be sent to the container
* @param sortQueues indicates whether it should re-sort the queues
*/
public void completedContainer(Resource clusterResource,
FiCaSchedulerApp application, FiCaSchedulerNode node,
RMContainer container, ContainerStatus containerStatus,
RMContainerEventType event, CSQueue childQueue,
boolean sortQueues);
/**
* Get the number of applications in the queue.
* @return number of applications
*/
public int getNumApplications();
/**
* Reinitialize the queue.
* @param newlyParsedQueue new queue to re-initalize from
* @param clusterResource resources in the cluster
*/
public void reinitialize(CSQueue newlyParsedQueue, Resource clusterResource)
throws IOException;
/**
* Update the cluster resource for queues as we add/remove nodes
* @param clusterResource the current cluster resource
* @param resourceLimits the current ResourceLimits
*/
public void updateClusterResource(Resource clusterResource,
ResourceLimits resourceLimits);
/**
* Get the {@link ActiveUsersManager} for the queue.
* @return the <code>ActiveUsersManager</code> for the queue
*/
public ActiveUsersManager getActiveUsersManager();
/**
* Adds all applications in the queue and its subqueues to the given collection.
* @param apps the collection to add the applications to
*/
public void collectSchedulerApplications(Collection<ApplicationAttemptId> apps);
/**
* Detach a container from this queue
* @param clusterResource the current cluster resource
* @param application application to which the container was assigned
* @param container the container to detach
*/
public void detachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer container);
/**
* Attach a container to this queue
* @param clusterResource the current cluster resource
* @param application application to which the container was assigned
* @param container the container to attach
*/
public void attachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer container);
/**
* Check whether <em>disable_preemption</em> property is set for this queue
* @return true if <em>disable_preemption</em> is set, false if not
*/
public boolean getPreemptionDisabled();
/**
* Get QueueCapacities of this queue
* @return queueCapacities
*/
public QueueCapacities getQueueCapacities();
/**
* Get ResourceUsage of this queue
* @return resourceUsage
*/
public ResourceUsage getQueueResourceUsage();
}
| 9,996 | 33.353952 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.server.utils.Lock.NoLock;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
@Private
@Unstable
public class LeafQueue extends AbstractCSQueue {
private static final Log LOG = LogFactory.getLog(LeafQueue.class);
private float absoluteUsedCapacity = 0.0f;
private int userLimit;
private float userLimitFactor;
protected int maxApplications;
protected int maxApplicationsPerUser;
private float maxAMResourcePerQueuePercent;
private volatile int nodeLocalityDelay;
Map<ApplicationAttemptId, FiCaSchedulerApp> applicationAttemptMap =
new HashMap<ApplicationAttemptId, FiCaSchedulerApp>();
private Priority defaultAppPriorityPerQueue;
Set<FiCaSchedulerApp> pendingApplications;
private volatile float minimumAllocationFactor;
private Map<String, User> users = new HashMap<String, User>();
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private CapacitySchedulerContext scheduler;
private final ActiveUsersManager activeUsersManager;
// cache last cluster resource to compute actual capacity
private Resource lastClusterResource = Resources.none();
// absolute capacity as a resource (based on cluster resource)
private Resource absoluteCapacityResource = Resources.none();
private final QueueResourceLimitsInfo queueResourceLimitsInfo =
new QueueResourceLimitsInfo();
private volatile ResourceLimits cachedResourceLimitsForHeadroom = null;
private OrderingPolicy<FiCaSchedulerApp>
orderingPolicy = new FifoOrderingPolicy<FiCaSchedulerApp>();
// record all ignore partition exclusivityRMContainer, this will be used to do
// preemption, key is the partition of the RMContainer allocated on
private Map<String, TreeSet<RMContainer>> ignorePartitionExclusivityRMContainers =
new HashMap<>();
public LeafQueue(CapacitySchedulerContext cs,
String queueName, CSQueue parent, CSQueue old) throws IOException {
super(cs, queueName, parent, old);
this.scheduler = cs;
this.activeUsersManager = new ActiveUsersManager(metrics);
if(LOG.isDebugEnabled()) {
LOG.debug("LeafQueue:" + " name=" + queueName
+ ", fullname=" + getQueuePath());
}
Comparator<FiCaSchedulerApp> applicationComparator =
cs.getApplicationComparator();
this.pendingApplications =
new TreeSet<FiCaSchedulerApp>(applicationComparator);
setupQueueConfigs(cs.getClusterResource());
}
protected synchronized void setupQueueConfigs(Resource clusterResource)
throws IOException {
super.setupQueueConfigs(clusterResource);
this.lastClusterResource = clusterResource;
updateAbsoluteCapacityResource(clusterResource);
this.cachedResourceLimitsForHeadroom = new ResourceLimits(clusterResource);
// Initialize headroom info, also used for calculating application
// master resource limits. Since this happens during queue initialization
// and all queues may not be realized yet, we'll use (optimistic)
// absoluteMaxCapacity (it will be replaced with the more accurate
// absoluteMaxAvailCapacity during headroom/userlimit/allocation events)
setQueueResourceLimitsInfo(clusterResource);
CapacitySchedulerConfiguration conf = csContext.getConfiguration();
setOrderingPolicy(conf.<FiCaSchedulerApp>getOrderingPolicy(getQueuePath()));
userLimit = conf.getUserLimit(getQueuePath());
userLimitFactor = conf.getUserLimitFactor(getQueuePath());
maxApplications = conf.getMaximumApplicationsPerQueue(getQueuePath());
if (maxApplications < 0) {
int maxSystemApps = conf.getMaximumSystemApplications();
maxApplications =
(int) (maxSystemApps * queueCapacities.getAbsoluteCapacity());
}
maxApplicationsPerUser =
(int)(maxApplications * (userLimit / 100.0f) * userLimitFactor);
maxAMResourcePerQueuePercent =
conf.getMaximumApplicationMasterResourcePerQueuePercent(getQueuePath());
if (!SchedulerUtils.checkQueueLabelExpression(
this.accessibleLabels, this.defaultLabelExpression, null)) {
throw new IOException("Invalid default label expression of "
+ " queue="
+ getQueueName()
+ " doesn't have permission to access all labels "
+ "in default label expression. labelExpression of resource request="
+ (this.defaultLabelExpression == null ? ""
: this.defaultLabelExpression)
+ ". Queue labels="
+ (getAccessibleNodeLabels() == null ? "" : StringUtils.join(
getAccessibleNodeLabels().iterator(), ',')));
}
nodeLocalityDelay = conf.getNodeLocalityDelay();
// re-init this since max allocation could have changed
this.minimumAllocationFactor =
Resources.ratio(resourceCalculator,
Resources.subtract(maximumAllocation, minimumAllocation),
maximumAllocation);
StringBuilder aclsString = new StringBuilder();
for (Map.Entry<AccessType, AccessControlList> e : acls.entrySet()) {
aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
}
StringBuilder labelStrBuilder = new StringBuilder();
if (accessibleLabels != null) {
for (String s : accessibleLabels) {
labelStrBuilder.append(s);
labelStrBuilder.append(",");
}
}
defaultAppPriorityPerQueue = Priority.newInstance(conf
.getDefaultApplicationPriorityConfPerQueue(getQueuePath()));
LOG.info("Initializing " + queueName + "\n" +
"capacity = " + queueCapacities.getCapacity() +
" [= (float) configuredCapacity / 100 ]" + "\n" +
"asboluteCapacity = " + queueCapacities.getAbsoluteCapacity() +
" [= parentAbsoluteCapacity * capacity ]" + "\n" +
"maxCapacity = " + queueCapacities.getMaximumCapacity() +
" [= configuredMaxCapacity ]" + "\n" +
"absoluteMaxCapacity = " + queueCapacities.getAbsoluteMaximumCapacity() +
" [= 1.0 maximumCapacity undefined, " +
"(parentAbsoluteMaxCapacity * maximumCapacity) / 100 otherwise ]" +
"\n" +
"userLimit = " + userLimit +
" [= configuredUserLimit ]" + "\n" +
"userLimitFactor = " + userLimitFactor +
" [= configuredUserLimitFactor ]" + "\n" +
"maxApplications = " + maxApplications +
" [= configuredMaximumSystemApplicationsPerQueue or" +
" (int)(configuredMaximumSystemApplications * absoluteCapacity)]" +
"\n" +
"maxApplicationsPerUser = " + maxApplicationsPerUser +
" [= (int)(maxApplications * (userLimit / 100.0f) * " +
"userLimitFactor) ]" + "\n" +
"usedCapacity = " + queueCapacities.getUsedCapacity() +
" [= usedResourcesMemory / " +
"(clusterResourceMemory * absoluteCapacity)]" + "\n" +
"absoluteUsedCapacity = " + absoluteUsedCapacity +
" [= usedResourcesMemory / clusterResourceMemory]" + "\n" +
"maxAMResourcePerQueuePercent = " + maxAMResourcePerQueuePercent +
" [= configuredMaximumAMResourcePercent ]" + "\n" +
"minimumAllocationFactor = " + minimumAllocationFactor +
" [= (float)(maximumAllocationMemory - minimumAllocationMemory) / " +
"maximumAllocationMemory ]" + "\n" +
"maximumAllocation = " + maximumAllocation +
" [= configuredMaxAllocation ]" + "\n" +
"numContainers = " + numContainers +
" [= currentNumContainers ]" + "\n" +
"state = " + state +
" [= configuredState ]" + "\n" +
"acls = " + aclsString +
" [= configuredAcls ]" + "\n" +
"nodeLocalityDelay = " + nodeLocalityDelay + "\n" +
"labels=" + labelStrBuilder.toString() + "\n" +
"nodeLocalityDelay = " + nodeLocalityDelay + "\n" +
"reservationsContinueLooking = " +
reservationsContinueLooking + "\n" +
"preemptionDisabled = " + getPreemptionDisabled() + "\n" +
"defaultAppPriorityPerQueue = " + defaultAppPriorityPerQueue);
}
@Override
public String getQueuePath() {
return getParent().getQueuePath() + "." + getQueueName();
}
/**
* Used only by tests.
*/
@Private
public float getMinimumAllocationFactor() {
return minimumAllocationFactor;
}
/**
* Used only by tests.
*/
@Private
public float getMaxAMResourcePerQueuePercent() {
return maxAMResourcePerQueuePercent;
}
public int getMaxApplications() {
return maxApplications;
}
public synchronized int getMaxApplicationsPerUser() {
return maxApplicationsPerUser;
}
@Override
public ActiveUsersManager getActiveUsersManager() {
return activeUsersManager;
}
@Override
public List<CSQueue> getChildQueues() {
return null;
}
/**
* Set user limit - used only for testing.
* @param userLimit new user limit
*/
synchronized void setUserLimit(int userLimit) {
this.userLimit = userLimit;
}
/**
* Set user limit factor - used only for testing.
* @param userLimitFactor new user limit factor
*/
synchronized void setUserLimitFactor(float userLimitFactor) {
this.userLimitFactor = userLimitFactor;
}
@Override
public synchronized int getNumApplications() {
return getNumPendingApplications() + getNumActiveApplications();
}
public synchronized int getNumPendingApplications() {
return pendingApplications.size();
}
public synchronized int getNumActiveApplications() {
return orderingPolicy.getNumSchedulableEntities();
}
@Private
public synchronized int getNumApplications(String user) {
return getUser(user).getTotalApplications();
}
@Private
public synchronized int getNumPendingApplications(String user) {
return getUser(user).getPendingApplications();
}
@Private
public synchronized int getNumActiveApplications(String user) {
return getUser(user).getActiveApplications();
}
public synchronized int getNumContainers() {
return numContainers;
}
@Override
public synchronized QueueState getState() {
return state;
}
@Private
public synchronized int getUserLimit() {
return userLimit;
}
@Private
public synchronized float getUserLimitFactor() {
return userLimitFactor;
}
@Override
public synchronized QueueInfo getQueueInfo(
boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = getQueueInfo();
return queueInfo;
}
@Override
public synchronized List<QueueUserACLInfo>
getQueueUserAclInfo(UserGroupInformation user) {
QueueUserACLInfo userAclInfo =
recordFactory.newRecordInstance(QueueUserACLInfo.class);
List<QueueACL> operations = new ArrayList<QueueACL>();
for (QueueACL operation : QueueACL.values()) {
if (hasAccess(operation, user)) {
operations.add(operation);
}
}
userAclInfo.setQueueName(getQueueName());
userAclInfo.setUserAcls(operations);
return Collections.singletonList(userAclInfo);
}
public String toString() {
return queueName + ": " +
"capacity=" + queueCapacities.getCapacity() + ", " +
"absoluteCapacity=" + queueCapacities.getAbsoluteCapacity() + ", " +
"usedResources=" + queueUsage.getUsed() + ", " +
"usedCapacity=" + getUsedCapacity() + ", " +
"absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + ", " +
"numApps=" + getNumApplications() + ", " +
"numContainers=" + getNumContainers();
}
@VisibleForTesting
public synchronized void setNodeLabelManager(RMNodeLabelsManager mgr) {
this.labelManager = mgr;
}
@VisibleForTesting
public synchronized User getUser(String userName) {
User user = users.get(userName);
if (user == null) {
user = new User();
users.put(userName, user);
}
return user;
}
/**
* @return an ArrayList of UserInfo objects who are active in this queue
*/
public synchronized ArrayList<UserInfo> getUsers() {
ArrayList<UserInfo> usersToReturn = new ArrayList<UserInfo>();
for (Map.Entry<String, User> entry : users.entrySet()) {
User user = entry.getValue();
usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(user
.getAllUsed()), user.getActiveApplications(), user
.getPendingApplications(), Resources.clone(user
.getConsumedAMResources()), Resources.clone(user
.getUserResourceLimit())));
}
return usersToReturn;
}
@Override
public synchronized void reinitialize(
CSQueue newlyParsedQueue, Resource clusterResource)
throws IOException {
// Sanity check
if (!(newlyParsedQueue instanceof LeafQueue) ||
!newlyParsedQueue.getQueuePath().equals(getQueuePath())) {
throw new IOException("Trying to reinitialize " + getQueuePath() +
" from " + newlyParsedQueue.getQueuePath());
}
LeafQueue newlyParsedLeafQueue = (LeafQueue)newlyParsedQueue;
// don't allow the maximum allocation to be decreased in size
// since we have already told running AM's the size
Resource oldMax = getMaximumAllocation();
Resource newMax = newlyParsedLeafQueue.getMaximumAllocation();
if (newMax.getMemory() < oldMax.getMemory()
|| newMax.getVirtualCores() < oldMax.getVirtualCores()) {
throw new IOException(
"Trying to reinitialize "
+ getQueuePath()
+ " the maximum allocation size can not be decreased!"
+ " Current setting: " + oldMax
+ ", trying to set it to: " + newMax);
}
setupQueueConfigs(clusterResource);
// queue metrics are updated, more resource may be available
// activate the pending applications if possible
activateApplications();
}
@Override
public void submitApplicationAttempt(FiCaSchedulerApp application,
String userName) {
// Careful! Locking order is important!
synchronized (this) {
User user = getUser(userName);
// Add the attempt to our data-structures
addApplicationAttempt(application, user);
}
// We don't want to update metrics for move app
if (application.isPending()) {
metrics.submitAppAttempt(userName);
}
getParent().submitApplicationAttempt(application, userName);
}
@Override
public void submitApplication(ApplicationId applicationId, String userName,
String queue) throws AccessControlException {
// Careful! Locking order is important!
// Check queue ACLs
UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(userName);
if (!hasAccess(QueueACL.SUBMIT_APPLICATIONS, userUgi)
&& !hasAccess(QueueACL.ADMINISTER_QUEUE, userUgi)) {
throw new AccessControlException("User " + userName + " cannot submit" +
" applications to queue " + getQueuePath());
}
User user = null;
synchronized (this) {
// Check if the queue is accepting jobs
if (getState() != QueueState.RUNNING) {
String msg = "Queue " + getQueuePath() +
" is STOPPED. Cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
// Check submission limits for queues
if (getNumApplications() >= getMaxApplications()) {
String msg = "Queue " + getQueuePath() +
" already has " + getNumApplications() + " applications," +
" cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
// Check submission limits for the user on this queue
user = getUser(userName);
if (user.getTotalApplications() >= getMaxApplicationsPerUser()) {
String msg = "Queue " + getQueuePath() +
" already has " + user.getTotalApplications() +
" applications from user " + userName +
" cannot accept submission of application: " + applicationId;
LOG.info(msg);
throw new AccessControlException(msg);
}
}
// Inform the parent queue
try {
getParent().submitApplication(applicationId, userName, queue);
} catch (AccessControlException ace) {
LOG.info("Failed to submit application to parent-queue: " +
getParent().getQueuePath(), ace);
throw ace;
}
}
public synchronized Resource getAMResourceLimit() {
/*
* The limit to the amount of resources which can be consumed by
* application masters for applications running in the queue
* is calculated by taking the greater of the max resources currently
* available to the queue (see absoluteMaxAvailCapacity) and the absolute
* resources guaranteed for the queue and multiplying it by the am
* resource percent.
*
* This is to allow a queue to grow its (proportional) application
* master resource use up to its max capacity when other queues are
* idle but to scale back down to it's guaranteed capacity as they
* become busy.
*
*/
Resource queueCurrentLimit;
synchronized (queueResourceLimitsInfo) {
queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit();
}
Resource queueCap = Resources.max(resourceCalculator, lastClusterResource,
absoluteCapacityResource, queueCurrentLimit);
Resource amResouceLimit =
Resources.multiplyAndNormalizeUp(resourceCalculator, queueCap,
maxAMResourcePerQueuePercent, minimumAllocation);
metrics.setAMResouceLimit(amResouceLimit);
return amResouceLimit;
}
public synchronized Resource getUserAMResourceLimit() {
/*
* The user amresource limit is based on the same approach as the
* user limit (as it should represent a subset of that). This means that
* it uses the absolute queue capacity instead of the max and is modified
* by the userlimit and the userlimit factor as is the userlimit
*
*/
float effectiveUserLimit = Math.max(userLimit / 100.0f, 1.0f /
Math.max(getActiveUsersManager().getNumActiveUsers(), 1));
return Resources.multiplyAndNormalizeUp(
resourceCalculator,
absoluteCapacityResource,
maxAMResourcePerQueuePercent * effectiveUserLimit *
userLimitFactor, minimumAllocation);
}
private synchronized void activateApplications() {
//limit of allowed resource usage for application masters
Resource amLimit = getAMResourceLimit();
Resource userAMLimit = getUserAMResourceLimit();
for (Iterator<FiCaSchedulerApp> i=pendingApplications.iterator();
i.hasNext(); ) {
FiCaSchedulerApp application = i.next();
ApplicationId applicationId = application.getApplicationId();
// Check am resource limit
Resource amIfStarted =
Resources.add(application.getAMResource(), queueUsage.getAMUsed());
if (LOG.isDebugEnabled()) {
LOG.debug("application AMResource " + application.getAMResource() +
" maxAMResourcePerQueuePercent " + maxAMResourcePerQueuePercent +
" amLimit " + amLimit +
" lastClusterResource " + lastClusterResource +
" amIfStarted " + amIfStarted);
}
if (!Resources.lessThanOrEqual(
resourceCalculator, lastClusterResource, amIfStarted, amLimit)) {
if (getNumActiveApplications() < 1) {
LOG.warn("maximum-am-resource-percent is insufficient to start a" +
" single application in queue, it is likely set too low." +
" skipping enforcement to allow at least one application to start");
} else {
LOG.info("Not activating application " + applicationId
+ " as amIfStarted: " + amIfStarted + " exceeds amLimit: "
+ amLimit);
continue;
}
}
// Check user am resource limit
User user = getUser(application.getUser());
Resource userAmIfStarted =
Resources.add(application.getAMResource(),
user.getConsumedAMResources());
if (!Resources.lessThanOrEqual(
resourceCalculator, lastClusterResource, userAmIfStarted,
userAMLimit)) {
if (getNumActiveApplications() < 1) {
LOG.warn("maximum-am-resource-percent is insufficient to start a" +
" single application in queue for user, it is likely set too low." +
" skipping enforcement to allow at least one application to start");
} else {
LOG.info("Not activating application " + applicationId
+ " for user: " + user + " as userAmIfStarted: "
+ userAmIfStarted + " exceeds userAmLimit: " + userAMLimit);
continue;
}
}
user.activateApplication();
orderingPolicy.addSchedulableEntity(application);
queueUsage.incAMUsed(application.getAMResource());
user.getResourceUsage().incAMUsed(application.getAMResource());
metrics.incAMUsed(application.getUser(), application.getAMResource());
metrics.setAMResouceLimitForUser(application.getUser(), userAMLimit);
i.remove();
LOG.info("Application " + applicationId + " from user: "
+ application.getUser() + " activated in queue: " + getQueueName());
}
}
private synchronized void addApplicationAttempt(FiCaSchedulerApp application,
User user) {
// Accept
user.submitApplication();
pendingApplications.add(application);
applicationAttemptMap.put(application.getApplicationAttemptId(), application);
// Activate applications
activateApplications();
LOG.info("Application added -" +
" appId: " + application.getApplicationId() +
" user: " + application.getUser() + "," +
" leaf-queue: " + getQueueName() +
" #user-pending-applications: " + user.getPendingApplications() +
" #user-active-applications: " + user.getActiveApplications() +
" #queue-pending-applications: " + getNumPendingApplications() +
" #queue-active-applications: " + getNumActiveApplications()
);
}
@Override
public void finishApplication(ApplicationId application, String user) {
// Inform the activeUsersManager
activeUsersManager.deactivateApplication(user, application);
// Inform the parent queue
getParent().finishApplication(application, user);
}
@Override
public void finishApplicationAttempt(FiCaSchedulerApp application, String queue) {
// Careful! Locking order is important!
synchronized (this) {
removeApplicationAttempt(application, getUser(application.getUser()));
}
getParent().finishApplicationAttempt(application, queue);
}
public synchronized void removeApplicationAttempt(
FiCaSchedulerApp application, User user) {
boolean wasActive =
orderingPolicy.removeSchedulableEntity(application);
if (!wasActive) {
pendingApplications.remove(application);
} else {
queueUsage.decAMUsed(application.getAMResource());
user.getResourceUsage().decAMUsed(application.getAMResource());
metrics.decAMUsed(application.getUser(), application.getAMResource());
}
applicationAttemptMap.remove(application.getApplicationAttemptId());
user.finishApplication(wasActive);
if (user.getTotalApplications() == 0) {
users.remove(application.getUser());
}
// Check if we can activate more applications
activateApplications();
LOG.info("Application removed -" +
" appId: " + application.getApplicationId() +
" user: " + application.getUser() +
" queue: " + getQueueName() +
" #user-pending-applications: " + user.getPendingApplications() +
" #user-active-applications: " + user.getActiveApplications() +
" #queue-pending-applications: " + getNumPendingApplications() +
" #queue-active-applications: " + getNumActiveApplications()
);
}
private synchronized FiCaSchedulerApp getApplication(
ApplicationAttemptId applicationAttemptId) {
return applicationAttemptMap.get(applicationAttemptId);
}
private void handleExcessReservedContainer(Resource clusterResource,
CSAssignment assignment) {
if (assignment.getExcessReservation() != null) {
RMContainer excessReservedContainer = assignment.getExcessReservation();
completedContainer(clusterResource, assignment.getApplication(),
scheduler.getNode(excessReservedContainer.getAllocatedNode()),
excessReservedContainer,
SchedulerUtils.createAbnormalContainerStatus(
excessReservedContainer.getContainerId(),
SchedulerUtils.UNRESERVED_CONTAINER),
RMContainerEventType.RELEASED, null, false);
assignment.setExcessReservation(null);
}
}
@Override
public synchronized CSAssignment assignContainers(Resource clusterResource,
FiCaSchedulerNode node, ResourceLimits currentResourceLimits,
SchedulingMode schedulingMode) {
updateCurrentResourceLimits(currentResourceLimits, clusterResource);
if (LOG.isDebugEnabled()) {
LOG.debug("assignContainers: node=" + node.getNodeName()
+ " #applications=" + orderingPolicy.getNumSchedulableEntities());
}
// Check for reserved resources
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
FiCaSchedulerApp application =
getApplication(reservedContainer.getApplicationAttemptId());
synchronized (application) {
CSAssignment assignment = application.assignReservedContainer(node, reservedContainer,
clusterResource, schedulingMode);
handleExcessReservedContainer(clusterResource, assignment);
return assignment;
}
}
// if our queue cannot access this node, just return
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
&& !accessibleToPartition(node.getPartition())) {
return CSAssignment.NULL_ASSIGNMENT;
}
// Check if this queue need more resource, simply skip allocation if this
// queue doesn't need more resources.
if (!hasPendingResourceRequest(node.getPartition(), clusterResource,
schedulingMode)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip this queue=" + getQueuePath()
+ ", because it doesn't need more resource, schedulingMode="
+ schedulingMode.name() + " node-partition=" + node.getPartition());
}
return CSAssignment.NULL_ASSIGNMENT;
}
for (Iterator<FiCaSchedulerApp> assignmentIterator =
orderingPolicy.getAssignmentIterator(); assignmentIterator.hasNext();) {
FiCaSchedulerApp application = assignmentIterator.next();
// Check queue max-capacity limit
if (!super.canAssignToThisQueue(clusterResource, node.getPartition(),
currentResourceLimits, application.getCurrentReservation(),
schedulingMode)) {
return CSAssignment.NULL_ASSIGNMENT;
}
Resource userLimit =
computeUserLimitAndSetHeadroom(application, clusterResource,
node.getPartition(), schedulingMode);
// Check user limit
if (!canAssignToUser(clusterResource, application.getUser(), userLimit,
application, node.getPartition(), currentResourceLimits)) {
continue;
}
// Try to schedule
CSAssignment assignment =
application.assignContainers(clusterResource, node,
currentResourceLimits, schedulingMode);
if (LOG.isDebugEnabled()) {
LOG.debug("post-assignContainers for application "
+ application.getApplicationId());
application.showRequests();
}
// Did we schedule or reserve a container?
Resource assigned = assignment.getResource();
handleExcessReservedContainer(clusterResource, assignment);
if (Resources.greaterThan(resourceCalculator, clusterResource, assigned,
Resources.none())) {
// Get reserved or allocated container from application
RMContainer reservedOrAllocatedRMContainer =
application.getRMContainer(assignment.getAssignmentInformation()
.getFirstAllocatedOrReservedContainerId());
// Book-keeping
// Note: Update headroom to account for current allocation too...
allocateResource(clusterResource, application, assigned,
node.getPartition(), reservedOrAllocatedRMContainer);
// Done
return assignment;
} else if (!assignment.getSkipped()) {
// If we don't allocate anything, and it is not skipped by application,
// we will return to respect FIFO of applications
return CSAssignment.NULL_ASSIGNMENT;
}
}
return CSAssignment.NULL_ASSIGNMENT;
}
protected Resource getHeadroom(User user, Resource queueCurrentLimit,
Resource clusterResource, FiCaSchedulerApp application) {
return getHeadroom(user, queueCurrentLimit, clusterResource,
computeUserLimit(application, clusterResource, user,
RMNodeLabelsManager.NO_LABEL,
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
}
private Resource getHeadroom(User user, Resource currentResourceLimit,
Resource clusterResource, Resource userLimit) {
/**
* Headroom is:
* min(
* min(userLimit, queueMaxCap) - userConsumed,
* queueMaxCap - queueUsedResources
* )
*
* ( which can be expressed as,
* min (userLimit - userConsumed, queuMaxCap - userConsumed,
* queueMaxCap - queueUsedResources)
* )
*
* given that queueUsedResources >= userConsumed, this simplifies to
*
* >> min (userlimit - userConsumed, queueMaxCap - queueUsedResources) <<
*
*/
Resource headroom =
Resources.min(resourceCalculator, clusterResource,
Resources.subtract(userLimit, user.getUsed()),
Resources.subtract(currentResourceLimit, queueUsage.getUsed())
);
// Normalize it before return
headroom =
Resources.roundDown(resourceCalculator, headroom, minimumAllocation);
return headroom;
}
private void setQueueResourceLimitsInfo(
Resource clusterResource) {
synchronized (queueResourceLimitsInfo) {
queueResourceLimitsInfo.setQueueCurrentLimit(cachedResourceLimitsForHeadroom
.getLimit());
queueResourceLimitsInfo.setClusterResource(clusterResource);
}
}
@Lock({LeafQueue.class, FiCaSchedulerApp.class})
Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application,
Resource clusterResource, String nodePartition,
SchedulingMode schedulingMode) {
String user = application.getUser();
User queueUser = getUser(user);
// Compute user limit respect requested labels,
// TODO, need consider headroom respect labels also
Resource userLimit =
computeUserLimit(application, clusterResource, queueUser,
nodePartition, schedulingMode);
setQueueResourceLimitsInfo(clusterResource);
Resource headroom =
getHeadroom(queueUser, cachedResourceLimitsForHeadroom.getLimit(),
clusterResource, userLimit);
if (LOG.isDebugEnabled()) {
LOG.debug("Headroom calculation for user " + user + ": " +
" userLimit=" + userLimit +
" queueMaxAvailRes=" + cachedResourceLimitsForHeadroom.getLimit() +
" consumed=" + queueUser.getUsed() +
" headroom=" + headroom);
}
CapacityHeadroomProvider headroomProvider = new CapacityHeadroomProvider(
queueUser, this, application, queueResourceLimitsInfo);
application.setHeadroomProvider(headroomProvider);
metrics.setAvailableResourcesToUser(user, headroom);
return userLimit;
}
@Lock(NoLock.class)
public int getNodeLocalityDelay() {
return nodeLocalityDelay;
}
@Lock(NoLock.class)
private Resource computeUserLimit(FiCaSchedulerApp application,
Resource clusterResource, User user,
String nodePartition, SchedulingMode schedulingMode) {
// What is our current capacity?
// * It is equal to the max(required, queue-capacity) if
// we're running below capacity. The 'max' ensures that jobs in queues
// with miniscule capacity (< 1 slot) make progress
// * If we're running over capacity, then its
// (usedResources + required) (which extra resources we are allocating)
Resource queueCapacity =
Resources.multiplyAndNormalizeUp(resourceCalculator,
labelManager.getResourceByLabel(nodePartition, clusterResource),
queueCapacities.getAbsoluteCapacity(nodePartition),
minimumAllocation);
// Assume we have required resource equals to minimumAllocation, this can
// make sure user limit can continuously increase till queueMaxResource
// reached.
Resource required = minimumAllocation;
// Allow progress for queues with miniscule capacity
queueCapacity =
Resources.max(
resourceCalculator, clusterResource,
queueCapacity,
required);
Resource currentCapacity =
Resources.lessThan(resourceCalculator, clusterResource,
queueUsage.getUsed(nodePartition), queueCapacity) ? queueCapacity
: Resources.add(queueUsage.getUsed(nodePartition), required);
// Never allow a single user to take more than the
// queue's configured capacity * user-limit-factor.
// Also, the queue's configured capacity should be higher than
// queue-hard-limit * ulMin
final int activeUsers = activeUsersManager.getNumActiveUsers();
// User limit resource is determined by:
// max{currentCapacity / #activeUsers, currentCapacity * user-limit-percentage%)
Resource userLimitResource = Resources.max(
resourceCalculator, clusterResource,
Resources.divideAndCeil(
resourceCalculator, currentCapacity, activeUsers),
Resources.divideAndCeil(
resourceCalculator,
Resources.multiplyAndRoundDown(
currentCapacity, userLimit),
100)
);
// User limit is capped by maxUserLimit
// - maxUserLimit = queueCapacity * user-limit-factor (RESPECT_PARTITION_EXCLUSIVITY)
// - maxUserLimit = total-partition-resource (IGNORE_PARTITION_EXCLUSIVITY)
//
// In IGNORE_PARTITION_EXCLUSIVITY mode, if a queue cannot access a
// partition, its guaranteed resource on that partition is 0. And
// user-limit-factor computation is based on queue's guaranteed capacity. So
// we will not cap user-limit as well as used resource when doing
// IGNORE_PARTITION_EXCLUSIVITY allocation.
Resource maxUserLimit = Resources.none();
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
maxUserLimit =
Resources.multiplyAndRoundDown(queueCapacity, userLimitFactor);
} else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
maxUserLimit =
labelManager.getResourceByLabel(nodePartition, clusterResource);
}
// Cap final user limit with maxUserLimit
userLimitResource =
Resources.roundUp(
resourceCalculator,
Resources.min(
resourceCalculator, clusterResource,
userLimitResource,
maxUserLimit
),
minimumAllocation);
if (LOG.isDebugEnabled()) {
String userName = application.getUser();
LOG.debug("User limit computation for " + userName +
" in queue " + getQueueName() +
" userLimitPercent=" + userLimit +
" userLimitFactor=" + userLimitFactor +
" required: " + required +
" consumed: " + user.getUsed() +
" user-limit-resource: " + userLimitResource +
" queueCapacity: " + queueCapacity +
" qconsumed: " + queueUsage.getUsed() +
" currentCapacity: " + currentCapacity +
" activeUsers: " + activeUsers +
" clusterCapacity: " + clusterResource
);
}
user.setUserResourceLimit(userLimitResource);
return userLimitResource;
}
@Private
protected synchronized boolean canAssignToUser(Resource clusterResource,
String userName, Resource limit, FiCaSchedulerApp application,
String nodePartition, ResourceLimits currentResoureLimits) {
User user = getUser(userName);
// Note: We aren't considering the current request since there is a fixed
// overhead of the AM, but it's a > check, not a >= check, so...
if (Resources
.greaterThan(resourceCalculator, clusterResource,
user.getUsed(nodePartition),
limit)) {
// if enabled, check to see if could we potentially use this node instead
// of a reserved node if the application has reserved containers
if (this.reservationsContinueLooking &&
nodePartition.equals(CommonNodeLabelsManager.NO_LABEL)) {
if (Resources.lessThanOrEqual(
resourceCalculator,
clusterResource,
Resources.subtract(user.getUsed(),
application.getCurrentReservation()), limit)) {
if (LOG.isDebugEnabled()) {
LOG.debug("User " + userName + " in queue " + getQueueName()
+ " will exceed limit based on reservations - " + " consumed: "
+ user.getUsed() + " reserved: "
+ application.getCurrentReservation() + " limit: " + limit);
}
Resource amountNeededToUnreserve =
Resources.subtract(user.getUsed(nodePartition), limit);
// we can only acquire a new container if we unreserve first to
// respect user-limit
currentResoureLimits.setAmountNeededUnreserve(amountNeededToUnreserve);
return true;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("User " + userName + " in queue " + getQueueName()
+ " will exceed limit - " + " consumed: "
+ user.getUsed(nodePartition) + " limit: " + limit);
}
return false;
}
return true;
}
@Override
public void completedContainer(Resource clusterResource,
FiCaSchedulerApp application, FiCaSchedulerNode node, RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event, CSQueue childQueue,
boolean sortQueues) {
if (application != null) {
boolean removed = false;
// Careful! Locking order is important!
synchronized (this) {
Container container = rmContainer.getContainer();
// Inform the application & the node
// Note: It's safe to assume that all state changes to RMContainer
// happen under scheduler's lock...
// So, this is, in effect, a transaction across application & node
if (rmContainer.getState() == RMContainerState.RESERVED) {
removed = application.unreserve(rmContainer.getReservedPriority(),
node, rmContainer);
} else {
removed =
application.containerCompleted(rmContainer, containerStatus,
event, node.getPartition());
node.releaseContainer(container);
}
// Book-keeping
if (removed) {
// Inform the ordering policy
orderingPolicy.containerReleased(application, rmContainer);
releaseResource(clusterResource, application,
container.getResource(), node.getPartition(), rmContainer);
LOG.info("completedContainer" +
" container=" + container +
" queue=" + this +
" cluster=" + clusterResource);
}
}
if (removed) {
// Inform the parent queue _outside_ of the leaf-queue lock
getParent().completedContainer(clusterResource, application, node,
rmContainer, null, event, this, sortQueues);
}
}
}
synchronized void allocateResource(Resource clusterResource,
SchedulerApplicationAttempt application, Resource resource,
String nodePartition, RMContainer rmContainer) {
super.allocateResource(clusterResource, resource, nodePartition);
// handle ignore exclusivity container
if (null != rmContainer && rmContainer.getNodeLabelExpression().equals(
RMNodeLabelsManager.NO_LABEL)
&& !nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) {
TreeSet<RMContainer> rmContainers = null;
if (null == (rmContainers =
ignorePartitionExclusivityRMContainers.get(nodePartition))) {
rmContainers = new TreeSet<>();
ignorePartitionExclusivityRMContainers.put(nodePartition, rmContainers);
}
rmContainers.add(rmContainer);
}
// Update user metrics
String userName = application.getUser();
User user = getUser(userName);
user.assignContainer(resource, nodePartition);
// Note this is a bit unconventional since it gets the object and modifies
// it here, rather then using set routine
Resources.subtractFrom(application.getHeadroom(), resource); // headroom
metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
if (LOG.isDebugEnabled()) {
LOG.info(getQueueName() +
" user=" + userName +
" used=" + queueUsage.getUsed() + " numContainers=" + numContainers +
" headroom = " + application.getHeadroom() +
" user-resources=" + user.getUsed()
);
}
}
synchronized void releaseResource(Resource clusterResource,
FiCaSchedulerApp application, Resource resource, String nodePartition,
RMContainer rmContainer) {
super.releaseResource(clusterResource, resource, nodePartition);
// handle ignore exclusivity container
if (null != rmContainer && rmContainer.getNodeLabelExpression().equals(
RMNodeLabelsManager.NO_LABEL)
&& !nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) {
if (ignorePartitionExclusivityRMContainers.containsKey(nodePartition)) {
Set<RMContainer> rmContainers =
ignorePartitionExclusivityRMContainers.get(nodePartition);
rmContainers.remove(rmContainer);
if (rmContainers.isEmpty()) {
ignorePartitionExclusivityRMContainers.remove(nodePartition);
}
}
}
// Update user metrics
String userName = application.getUser();
User user = getUser(userName);
user.releaseContainer(resource, nodePartition);
metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
LOG.info(getQueueName() +
" used=" + queueUsage.getUsed() + " numContainers=" + numContainers +
" user=" + userName + " user-resources=" + user.getUsed());
}
private void updateAbsoluteCapacityResource(Resource clusterResource) {
absoluteCapacityResource =
Resources.multiplyAndNormalizeUp(resourceCalculator, labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource),
queueCapacities.getAbsoluteCapacity(), minimumAllocation);
}
private void updateCurrentResourceLimits(
ResourceLimits currentResourceLimits, Resource clusterResource) {
// TODO: need consider non-empty node labels when resource limits supports
// node labels
// Even if ParentQueue will set limits respect child's max queue capacity,
// but when allocating reserved container, CapacityScheduler doesn't do
// this. So need cap limits by queue's max capacity here.
this.cachedResourceLimitsForHeadroom =
new ResourceLimits(currentResourceLimits.getLimit());
Resource queueMaxResource =
Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource),
queueCapacities
.getAbsoluteMaximumCapacity(RMNodeLabelsManager.NO_LABEL),
minimumAllocation);
this.cachedResourceLimitsForHeadroom.setLimit(Resources.min(
resourceCalculator, clusterResource, queueMaxResource,
currentResourceLimits.getLimit()));
}
@Override
public synchronized void updateClusterResource(Resource clusterResource,
ResourceLimits currentResourceLimits) {
updateCurrentResourceLimits(currentResourceLimits, clusterResource);
lastClusterResource = clusterResource;
updateAbsoluteCapacityResource(clusterResource);
// Update headroom info based on new cluster resource value
// absoluteMaxCapacity now, will be replaced with absoluteMaxAvailCapacity
// during allocation
setQueueResourceLimitsInfo(clusterResource);
// Update metrics
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, null);
// queue metrics are updated, more resource may be available
// activate the pending applications if possible
activateApplications();
// Update application properties
for (FiCaSchedulerApp application :
orderingPolicy.getSchedulableEntities()) {
synchronized (application) {
computeUserLimitAndSetHeadroom(application, clusterResource,
RMNodeLabelsManager.NO_LABEL,
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
}
}
}
@VisibleForTesting
public static class User {
ResourceUsage userResourceUsage = new ResourceUsage();
volatile Resource userResourceLimit = Resource.newInstance(0, 0);
int pendingApplications = 0;
int activeApplications = 0;
public ResourceUsage getResourceUsage() {
return userResourceUsage;
}
public Resource getUsed() {
return userResourceUsage.getUsed();
}
public Resource getAllUsed() {
return userResourceUsage.getAllUsed();
}
public Resource getUsed(String label) {
return userResourceUsage.getUsed(label);
}
public int getPendingApplications() {
return pendingApplications;
}
public int getActiveApplications() {
return activeApplications;
}
public Resource getConsumedAMResources() {
return userResourceUsage.getAMUsed();
}
public int getTotalApplications() {
return getPendingApplications() + getActiveApplications();
}
public synchronized void submitApplication() {
++pendingApplications;
}
public synchronized void activateApplication() {
--pendingApplications;
++activeApplications;
}
public synchronized void finishApplication(boolean wasActive) {
if (wasActive) {
--activeApplications;
}
else {
--pendingApplications;
}
}
public void assignContainer(Resource resource, String nodePartition) {
userResourceUsage.incUsed(nodePartition, resource);
}
public void releaseContainer(Resource resource, String nodePartition) {
userResourceUsage.decUsed(nodePartition, resource);
}
public Resource getUserResourceLimit() {
return userResourceLimit;
}
public void setUserResourceLimit(Resource userResourceLimit) {
this.userResourceLimit = userResourceLimit;
}
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt attempt, RMContainer rmContainer) {
if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
return;
}
// Careful! Locking order is important!
synchronized (this) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
allocateResource(clusterResource, attempt, rmContainer.getContainer()
.getResource(), node.getPartition(), rmContainer);
}
getParent().recoverContainer(clusterResource, attempt, rmContainer);
}
/**
* Obtain (read-only) collection of active applications.
*/
public Collection<FiCaSchedulerApp> getApplications() {
return orderingPolicy.getSchedulableEntities();
}
// return a single Resource capturing the overal amount of pending resources
public synchronized Resource getTotalResourcePending() {
Resource ret = BuilderUtils.newResource(0, 0);
for (FiCaSchedulerApp f :
orderingPolicy.getSchedulableEntities()) {
Resources.addTo(ret, f.getTotalPendingRequests());
}
return ret;
}
@Override
public synchronized void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps) {
for (FiCaSchedulerApp pendingApp : pendingApplications) {
apps.add(pendingApp.getApplicationAttemptId());
}
for (FiCaSchedulerApp app :
orderingPolicy.getSchedulableEntities()) {
apps.add(app.getApplicationAttemptId());
}
}
@Override
public void attachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer rmContainer) {
if (application != null) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
allocateResource(clusterResource, application, rmContainer.getContainer()
.getResource(), node.getPartition(), rmContainer);
LOG.info("movedContainer" + " container=" + rmContainer.getContainer()
+ " resource=" + rmContainer.getContainer().getResource()
+ " queueMoveIn=" + this + " usedCapacity=" + getUsedCapacity()
+ " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used="
+ queueUsage.getUsed() + " cluster=" + clusterResource);
// Inform the parent queue
getParent().attachContainer(clusterResource, application, rmContainer);
}
}
@Override
public void detachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer rmContainer) {
if (application != null) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
releaseResource(clusterResource, application, rmContainer.getContainer()
.getResource(), node.getPartition(), rmContainer);
LOG.info("movedContainer" + " container=" + rmContainer.getContainer()
+ " resource=" + rmContainer.getContainer().getResource()
+ " queueMoveOut=" + this + " usedCapacity=" + getUsedCapacity()
+ " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used="
+ queueUsage.getUsed() + " cluster=" + clusterResource);
// Inform the parent queue
getParent().detachContainer(clusterResource, application, rmContainer);
}
}
/**
* return all ignored partition exclusivity RMContainers in the LeafQueue, this
* will be used by preemption policy, and use of return
* ignorePartitionExclusivityRMContainer should protected by LeafQueue
* synchronized lock
*/
public synchronized Map<String, TreeSet<RMContainer>>
getIgnoreExclusivityRMContainers() {
return ignorePartitionExclusivityRMContainers;
}
public void setCapacity(float capacity) {
queueCapacities.setCapacity(capacity);
}
public void setAbsoluteCapacity(float absoluteCapacity) {
queueCapacities.setAbsoluteCapacity(absoluteCapacity);
}
public void setMaxApplications(int maxApplications) {
this.maxApplications = maxApplications;
}
public synchronized OrderingPolicy<FiCaSchedulerApp>
getOrderingPolicy() {
return orderingPolicy;
}
public synchronized void setOrderingPolicy(
OrderingPolicy<FiCaSchedulerApp> orderingPolicy) {
orderingPolicy.addAllSchedulableEntities(
this.orderingPolicy.getSchedulableEntities()
);
this.orderingPolicy = orderingPolicy;
}
@Override
public Priority getDefaultApplicationPriority() {
return defaultAppPriorityPerQueue;
}
/*
* Holds shared values used by all applications in
* the queue to calculate headroom on demand
*/
static class QueueResourceLimitsInfo {
private Resource queueCurrentLimit;
private Resource clusterResource;
public void setQueueCurrentLimit(Resource currentLimit) {
this.queueCurrentLimit = currentLimit;
}
public Resource getQueueCurrentLimit() {
return queueCurrentLimit;
}
public void setClusterResource(Resource clusterResource) {
this.clusterResource = clusterResource;
}
public Resource getClusterResource() {
return clusterResource;
}
}
}
| 56,312 | 36.768612 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSAssignment.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Unstable
public class CSAssignment {
public static final CSAssignment NULL_ASSIGNMENT =
new CSAssignment(Resources.createResource(0, 0), NodeType.NODE_LOCAL);
public static final CSAssignment SKIP_ASSIGNMENT = new CSAssignment(true);
private Resource resource;
private NodeType type;
private RMContainer excessReservation;
private FiCaSchedulerApp application;
private final boolean skipped;
private boolean fulfilledReservation;
private final AssignmentInformation assignmentInformation;
public CSAssignment(Resource resource, NodeType type) {
this(resource, type, null, null, false, false);
}
public CSAssignment(FiCaSchedulerApp application,
RMContainer excessReservation) {
this(excessReservation.getContainer().getResource(), NodeType.NODE_LOCAL,
excessReservation, application, false, false);
}
public CSAssignment(boolean skipped) {
this(Resource.newInstance(0, 0), NodeType.NODE_LOCAL, null, null, skipped,
false);
}
public CSAssignment(Resource resource, NodeType type,
RMContainer excessReservation, FiCaSchedulerApp application,
boolean skipped, boolean fulfilledReservation) {
this.resource = resource;
this.type = type;
this.excessReservation = excessReservation;
this.application = application;
this.skipped = skipped;
this.fulfilledReservation = fulfilledReservation;
this.assignmentInformation = new AssignmentInformation();
}
public Resource getResource() {
return resource;
}
public void setResource(Resource resource) {
this.resource = resource;
}
public NodeType getType() {
return type;
}
public void setType(NodeType type) {
this.type = type;
}
public FiCaSchedulerApp getApplication() {
return application;
}
public void setApplication(FiCaSchedulerApp application) {
this.application = application;
}
public RMContainer getExcessReservation() {
return excessReservation;
}
public void setExcessReservation(RMContainer rmContainer) {
excessReservation = rmContainer;
}
public boolean getSkipped() {
return skipped;
}
@Override
public String toString() {
String ret = "resource:" + resource.toString();
ret += "; type:" + type;
ret += "; excessReservation:" + excessReservation;
ret +=
"; applicationid:"
+ (application != null ? application.getApplicationId().toString()
: "null");
ret += "; skipped:" + skipped;
ret += "; fulfilled reservation:" + fulfilledReservation;
ret +=
"; allocations(count/resource):"
+ assignmentInformation.getNumAllocations() + "/"
+ assignmentInformation.getAllocated().toString();
ret +=
"; reservations(count/resource):"
+ assignmentInformation.getNumReservations() + "/"
+ assignmentInformation.getReserved().toString();
return ret;
}
public void setFulfilledReservation(boolean fulfilledReservation) {
this.fulfilledReservation = fulfilledReservation;
}
public boolean isFulfilledReservation() {
return this.fulfilledReservation;
}
public AssignmentInformation getAssignmentInformation() {
return this.assignmentInformation;
}
}
| 4,721 | 32.489362 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/SchedulingMode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
/**
* Scheduling modes, see below for detailed explanations
*/
public enum SchedulingMode {
/**
* <p>
* When a node has partition (say partition=x), only application in the queue
* can access to partition=x AND requires for partition=x resource can get
* chance to allocate on the node.
* </p>
*
* <p>
* When a node has no partition, only application requires non-partitioned
* resource can get chance to allocate on the node.
* </p>
*/
RESPECT_PARTITION_EXCLUSIVITY,
/**
* Only used when a node has partition AND the partition isn't an exclusive
* partition AND application requires non-partitioned resource.
*/
IGNORE_PARTITION_EXCLUSIVITY
}
| 1,584 | 34.222222 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueStatistics;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.security.PrivilegedEntity;
import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.collect.Sets;
public abstract class AbstractCSQueue implements CSQueue {
private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);
CSQueue parent;
final String queueName;
volatile int numContainers;
final Resource minimumAllocation;
volatile Resource maximumAllocation;
QueueState state;
final CSQueueMetrics metrics;
protected final PrivilegedEntity queueEntity;
final ResourceCalculator resourceCalculator;
Set<String> accessibleLabels;
RMNodeLabelsManager labelManager;
String defaultLabelExpression;
Map<AccessType, AccessControlList> acls =
new HashMap<AccessType, AccessControlList>();
volatile boolean reservationsContinueLooking;
private boolean preemptionDisabled;
// Track resource usage-by-label like used-resource/pending-resource, etc.
ResourceUsage queueUsage;
// Track capacities like used-capcity/abs-used-capacity/capacity/abs-capacity,
// etc.
QueueCapacities queueCapacities;
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
protected CapacitySchedulerContext csContext;
protected YarnAuthorizationProvider authorizer = null;
public AbstractCSQueue(CapacitySchedulerContext cs,
String queueName, CSQueue parent, CSQueue old) throws IOException {
this.labelManager = cs.getRMContext().getNodeLabelManager();
this.parent = parent;
this.queueName = queueName;
this.resourceCalculator = cs.getResourceCalculator();
// must be called after parent and queueName is set
this.metrics =
old != null ? (CSQueueMetrics) old.getMetrics() : CSQueueMetrics
.forQueue(getQueuePath(), parent, cs.getConfiguration()
.getEnableUserMetrics(), cs.getConf());
this.csContext = cs;
this.minimumAllocation = csContext.getMinimumResourceCapability();
// initialize ResourceUsage
queueUsage = new ResourceUsage();
queueEntity = new PrivilegedEntity(EntityType.QUEUE, getQueuePath());
// initialize QueueCapacities
queueCapacities = new QueueCapacities(parent == null);
}
protected void setupConfigurableCapacities() {
CSQueueUtils.loadUpdateAndCheckCapacities(
getQueuePath(),
csContext.getConfiguration(),
queueCapacities,
parent == null ? null : parent.getQueueCapacities());
}
@Override
public synchronized float getCapacity() {
return queueCapacities.getCapacity();
}
@Override
public synchronized float getAbsoluteCapacity() {
return queueCapacities.getAbsoluteCapacity();
}
@Override
public float getAbsoluteMaximumCapacity() {
return queueCapacities.getAbsoluteMaximumCapacity();
}
@Override
public synchronized float getAbsoluteUsedCapacity() {
return queueCapacities.getAbsoluteUsedCapacity();
}
@Override
public float getMaximumCapacity() {
return queueCapacities.getMaximumCapacity();
}
@Override
public synchronized float getUsedCapacity() {
return queueCapacities.getUsedCapacity();
}
@Override
public Resource getUsedResources() {
return queueUsage.getUsed();
}
public synchronized int getNumContainers() {
return numContainers;
}
@Override
public synchronized QueueState getState() {
return state;
}
@Override
public CSQueueMetrics getMetrics() {
return metrics;
}
@Override
public String getQueueName() {
return queueName;
}
public PrivilegedEntity getPrivilegedEntity() {
return queueEntity;
}
@Override
public synchronized CSQueue getParent() {
return parent;
}
@Override
public synchronized void setParent(CSQueue newParentQueue) {
this.parent = (ParentQueue)newParentQueue;
}
public Set<String> getAccessibleNodeLabels() {
return accessibleLabels;
}
@Override
public boolean hasAccess(QueueACL acl, UserGroupInformation user) {
return authorizer.checkPermission(SchedulerUtils.toAccessType(acl),
queueEntity, user);
}
@Override
public synchronized void setUsedCapacity(float usedCapacity) {
queueCapacities.setUsedCapacity(usedCapacity);
}
@Override
public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) {
queueCapacities.setAbsoluteUsedCapacity(absUsedCapacity);
}
/**
* Set maximum capacity - used only for testing.
* @param maximumCapacity new max capacity
*/
synchronized void setMaxCapacity(float maximumCapacity) {
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(),
queueCapacities.getCapacity(), maximumCapacity);
float absMaxCapacity =
CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent);
CSQueueUtils.checkAbsoluteCapacity(getQueueName(),
queueCapacities.getAbsoluteCapacity(),
absMaxCapacity);
queueCapacities.setMaximumCapacity(maximumCapacity);
queueCapacities.setAbsoluteMaximumCapacity(absMaxCapacity);
}
@Override
public String getDefaultNodeLabelExpression() {
return defaultLabelExpression;
}
synchronized void setupQueueConfigs(Resource clusterResource)
throws IOException {
// get labels
this.accessibleLabels =
csContext.getConfiguration().getAccessibleNodeLabels(getQueuePath());
this.defaultLabelExpression = csContext.getConfiguration()
.getDefaultNodeLabelExpression(getQueuePath());
// inherit from parent if labels not set
if (this.accessibleLabels == null && parent != null) {
this.accessibleLabels = parent.getAccessibleNodeLabels();
}
// inherit from parent if labels not set
if (this.defaultLabelExpression == null && parent != null
&& this.accessibleLabels.containsAll(parent.getAccessibleNodeLabels())) {
this.defaultLabelExpression = parent.getDefaultNodeLabelExpression();
}
// After we setup labels, we can setup capacities
setupConfigurableCapacities();
this.maximumAllocation =
csContext.getConfiguration().getMaximumAllocationPerQueue(
getQueuePath());
authorizer = YarnAuthorizationProvider.getInstance(csContext.getConf());
this.state = csContext.getConfiguration().getState(getQueuePath());
this.acls = csContext.getConfiguration().getAcls(getQueuePath());
// Update metrics
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, null);
// Check if labels of this queue is a subset of parent queue, only do this
// when we not root
if (parent != null && parent.getParent() != null) {
if (parent.getAccessibleNodeLabels() != null
&& !parent.getAccessibleNodeLabels().contains(RMNodeLabelsManager.ANY)) {
// if parent isn't "*", child shouldn't be "*" too
if (this.getAccessibleNodeLabels().contains(RMNodeLabelsManager.ANY)) {
throw new IOException("Parent's accessible queue is not ANY(*), "
+ "but child's accessible queue is *");
} else {
Set<String> diff =
Sets.difference(this.getAccessibleNodeLabels(),
parent.getAccessibleNodeLabels());
if (!diff.isEmpty()) {
throw new IOException("Some labels of child queue is not a subset "
+ "of parent queue, these labels=["
+ StringUtils.join(diff, ",") + "]");
}
}
}
}
this.reservationsContinueLooking = csContext.getConfiguration()
.getReservationContinueLook();
this.preemptionDisabled = isQueueHierarchyPreemptionDisabled(this);
}
protected QueueInfo getQueueInfo() {
QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
queueInfo.setQueueName(queueName);
queueInfo.setAccessibleNodeLabels(accessibleLabels);
queueInfo.setCapacity(queueCapacities.getCapacity());
queueInfo.setMaximumCapacity(queueCapacities.getMaximumCapacity());
queueInfo.setQueueState(state);
queueInfo.setDefaultNodeLabelExpression(defaultLabelExpression);
queueInfo.setCurrentCapacity(getUsedCapacity());
queueInfo.setQueueStatistics(getQueueStatistics());
return queueInfo;
}
public QueueStatistics getQueueStatistics() {
QueueStatistics stats =
recordFactory.newRecordInstance(QueueStatistics.class);
stats.setNumAppsSubmitted(getMetrics().getAppsSubmitted());
stats.setNumAppsRunning(getMetrics().getAppsRunning());
stats.setNumAppsPending(getMetrics().getAppsPending());
stats.setNumAppsCompleted(getMetrics().getAppsCompleted());
stats.setNumAppsKilled(getMetrics().getAppsKilled());
stats.setNumAppsFailed(getMetrics().getAppsFailed());
stats.setNumActiveUsers(getMetrics().getActiveUsers());
stats.setAvailableMemoryMB(getMetrics().getAvailableMB());
stats.setAllocatedMemoryMB(getMetrics().getAllocatedMB());
stats.setPendingMemoryMB(getMetrics().getPendingMB());
stats.setReservedMemoryMB(getMetrics().getReservedMB());
stats.setAvailableVCores(getMetrics().getAvailableVirtualCores());
stats.setAllocatedVCores(getMetrics().getAllocatedVirtualCores());
stats.setPendingVCores(getMetrics().getPendingVirtualCores());
stats.setReservedVCores(getMetrics().getReservedVirtualCores());
stats.setPendingContainers(getMetrics().getPendingContainers());
stats.setAllocatedContainers(getMetrics().getAllocatedContainers());
stats.setReservedContainers(getMetrics().getReservedContainers());
return stats;
}
@Private
public Resource getMaximumAllocation() {
return maximumAllocation;
}
@Private
public Resource getMinimumAllocation() {
return minimumAllocation;
}
synchronized void allocateResource(Resource clusterResource,
Resource resource, String nodePartition) {
queueUsage.incUsed(nodePartition, resource);
++numContainers;
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, nodePartition);
}
protected synchronized void releaseResource(Resource clusterResource,
Resource resource, String nodePartition) {
queueUsage.decUsed(nodePartition, resource);
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource,
minimumAllocation, this, labelManager, nodePartition);
--numContainers;
}
@Private
public boolean getReservationContinueLooking() {
return reservationsContinueLooking;
}
@Private
public Map<AccessType, AccessControlList> getACLs() {
return acls;
}
@Private
public boolean getPreemptionDisabled() {
return preemptionDisabled;
}
@Private
public QueueCapacities getQueueCapacities() {
return queueCapacities;
}
@Private
public ResourceUsage getQueueResourceUsage() {
return queueUsage;
}
/**
* The specified queue is preemptable if system-wide preemption is turned on
* unless any queue in the <em>qPath</em> hierarchy has explicitly turned
* preemption off.
* NOTE: Preemptability is inherited from a queue's parent.
*
* @return true if queue has preemption disabled, false otherwise
*/
private boolean isQueueHierarchyPreemptionDisabled(CSQueue q) {
CapacitySchedulerConfiguration csConf = csContext.getConfiguration();
boolean systemWidePreemption =
csConf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
CSQueue parentQ = q.getParent();
// If the system-wide preemption switch is turned off, all of the queues in
// the qPath hierarchy have preemption disabled, so return true.
if (!systemWidePreemption) return true;
// If q is the root queue and the system-wide preemption switch is turned
// on, then q does not have preemption disabled (default=false, below)
// unless the preemption_disabled property is explicitly set.
if (parentQ == null) {
return csConf.getPreemptionDisabled(q.getQueuePath(), false);
}
// If this is not the root queue, inherit the default value for the
// preemption_disabled property from the parent. Preemptability will be
// inherited from the parent's hierarchy unless explicitly overridden at
// this level.
return csConf.getPreemptionDisabled(q.getQueuePath(),
parentQ.getPreemptionDisabled());
}
private Resource getCurrentLimitResource(String nodePartition,
Resource clusterResource, ResourceLimits currentResourceLimits,
SchedulingMode schedulingMode) {
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
/*
* Current limit resource: For labeled resource: limit = queue-max-resource
* (TODO, this part need update when we support labeled-limit) For
* non-labeled resource: limit = min(queue-max-resource,
* limit-set-by-parent)
*/
Resource queueMaxResource =
Resources.multiplyAndNormalizeDown(resourceCalculator,
labelManager.getResourceByLabel(nodePartition, clusterResource),
queueCapacities.getAbsoluteMaximumCapacity(nodePartition), minimumAllocation);
if (nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) {
return Resources.min(resourceCalculator, clusterResource,
queueMaxResource, currentResourceLimits.getLimit());
}
return queueMaxResource;
} else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
// When we doing non-exclusive resource allocation, maximum capacity of
// all queues on this label equals to total resource with the label.
return labelManager.getResourceByLabel(nodePartition, clusterResource);
}
return Resources.none();
}
synchronized boolean canAssignToThisQueue(Resource clusterResource,
String nodePartition, ResourceLimits currentResourceLimits, Resource resourceCouldBeUnreserved,
SchedulingMode schedulingMode) {
// Get current limited resource:
// - When doing RESPECT_PARTITION_EXCLUSIVITY allocation, we will respect
// queues' max capacity.
// - When doing IGNORE_PARTITION_EXCLUSIVITY allocation, we will not respect
// queue's max capacity, queue's max capacity on the partition will be
// considered to be 100%. Which is a queue can use all resource in the
// partition.
// Doing this because: for non-exclusive allocation, we make sure there's
// idle resource on the partition, to avoid wastage, such resource will be
// leveraged as much as we can, and preemption policy will reclaim it back
// when partitoned-resource-request comes back.
Resource currentLimitResource =
getCurrentLimitResource(nodePartition, clusterResource,
currentResourceLimits, schedulingMode);
Resource nowTotalUsed = queueUsage.getUsed(nodePartition);
// Set headroom for currentResourceLimits
currentResourceLimits.setHeadroom(Resources.subtract(currentLimitResource,
nowTotalUsed));
if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource,
nowTotalUsed, currentLimitResource)) {
// if reservation continous looking enabled, check to see if could we
// potentially use this node instead of a reserved node if the application
// has reserved containers.
// TODO, now only consider reservation cases when the node has no label
if (this.reservationsContinueLooking
&& nodePartition.equals(RMNodeLabelsManager.NO_LABEL)
&& Resources.greaterThan(resourceCalculator, clusterResource,
resourceCouldBeUnreserved, Resources.none())) {
// resource-without-reserved = used - reserved
Resource newTotalWithoutReservedResource =
Resources.subtract(nowTotalUsed, resourceCouldBeUnreserved);
// when total-used-without-reserved-resource < currentLimit, we still
// have chance to allocate on this node by unreserving some containers
if (Resources.lessThan(resourceCalculator, clusterResource,
newTotalWithoutReservedResource, currentLimitResource)) {
if (LOG.isDebugEnabled()) {
LOG.debug("try to use reserved: " + getQueueName()
+ " usedResources: " + queueUsage.getUsed()
+ ", clusterResources: " + clusterResource
+ ", reservedResources: " + resourceCouldBeUnreserved
+ ", capacity-without-reserved: "
+ newTotalWithoutReservedResource + ", maxLimitCapacity: "
+ currentLimitResource);
}
return true;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(getQueueName()
+ "Check assign to queue, nodePartition="
+ nodePartition
+ " usedResources: "
+ queueUsage.getUsed(nodePartition)
+ " clusterResources: "
+ clusterResource
+ " currentUsedCapacity "
+ Resources.divide(resourceCalculator, clusterResource,
queueUsage.getUsed(nodePartition),
labelManager.getResourceByLabel(nodePartition, clusterResource))
+ " max-capacity: "
+ queueCapacities.getAbsoluteMaximumCapacity(nodePartition) + ")");
}
return false;
}
return true;
}
@Override
public void incPendingResource(String nodeLabel, Resource resourceToInc) {
if (nodeLabel == null) {
nodeLabel = RMNodeLabelsManager.NO_LABEL;
}
// ResourceUsage has its own lock, no addition lock needs here.
queueUsage.incPending(nodeLabel, resourceToInc);
if (null != parent) {
parent.incPendingResource(nodeLabel, resourceToInc);
}
}
@Override
public void decPendingResource(String nodeLabel, Resource resourceToDec) {
if (nodeLabel == null) {
nodeLabel = RMNodeLabelsManager.NO_LABEL;
}
// ResourceUsage has its own lock, no addition lock needs here.
queueUsage.decPending(nodeLabel, resourceToDec);
if (null != parent) {
parent.decPendingResource(nodeLabel, resourceToDec);
}
}
/**
* Return if the queue has pending resource on given nodePartition and
* schedulingMode.
*/
boolean hasPendingResourceRequest(String nodePartition,
Resource cluster, SchedulingMode schedulingMode) {
return SchedulerUtils.hasPendingResourceRequest(resourceCalculator,
queueUsage, nodePartition, cluster, schedulingMode);
}
public boolean accessibleToPartition(String nodePartition) {
// if queue's label is *, it can access any node
if (accessibleLabels != null
&& accessibleLabels.contains(RMNodeLabelsManager.ANY)) {
return true;
}
// any queue can access to a node without label
if (nodePartition == null
|| nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) {
return true;
}
// a queue can access to a node only if it contains any label of the node
if (accessibleLabels != null && accessibleLabels.contains(nodePartition)) {
return true;
}
// sorry, you cannot access
return false;
}
@Override
public Priority getDefaultApplicationPriority() {
// TODO add dummy implementation
return null;
}
}
| 21,905 | 36.703959 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.util.resource.Resources;
public class ContainerAllocation {
public static final ContainerAllocation PRIORITY_SKIPPED =
new ContainerAllocation(null, null, AllocationState.PRIORITY_SKIPPED);
public static final ContainerAllocation APP_SKIPPED =
new ContainerAllocation(null, null, AllocationState.APP_SKIPPED);
public static final ContainerAllocation QUEUE_SKIPPED =
new ContainerAllocation(null, null, AllocationState.QUEUE_SKIPPED);
public static final ContainerAllocation LOCALITY_SKIPPED =
new ContainerAllocation(null, null, AllocationState.LOCALITY_SKIPPED);
RMContainer containerToBeUnreserved;
private Resource resourceToBeAllocated = Resources.none();
AllocationState state;
NodeType containerNodeType = NodeType.NODE_LOCAL;
NodeType requestNodeType = NodeType.NODE_LOCAL;
Container updatedContainer;
public ContainerAllocation(RMContainer containerToBeUnreserved,
Resource resourceToBeAllocated, AllocationState state) {
this.containerToBeUnreserved = containerToBeUnreserved;
this.resourceToBeAllocated = resourceToBeAllocated;
this.state = state;
}
public RMContainer getContainerToBeUnreserved() {
return containerToBeUnreserved;
}
public Resource getResourceToBeAllocated() {
if (resourceToBeAllocated == null) {
return Resources.none();
}
return resourceToBeAllocated;
}
public AllocationState getAllocationState() {
return state;
}
public NodeType getContainerNodeType() {
return containerNodeType;
}
public Container getUpdatedContainer() {
return updatedContainer;
}
}
| 2,812 | 35.532468 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AllocationState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator;
public enum AllocationState {
APP_SKIPPED,
PRIORITY_SKIPPED,
LOCALITY_SKIPPED,
QUEUE_SKIPPED,
ALLOCATED,
RESERVED
}
| 1,020 | 35.464286 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* Allocate normal (new) containers, considers locality/label, etc. Using
* delayed scheduling mechanism to get better locality allocation.
*/
public class RegularContainerAllocator extends ContainerAllocator {
private static final Log LOG = LogFactory.getLog(RegularContainerAllocator.class);
private ResourceRequest lastResourceRequest = null;
public RegularContainerAllocator(FiCaSchedulerApp application,
ResourceCalculator rc, RMContext rmContext) {
super(application, rc, rmContext);
}
private ContainerAllocation preCheckForNewContainer(Resource clusterResource,
FiCaSchedulerNode node, SchedulingMode schedulingMode,
ResourceLimits resourceLimits, Priority priority) {
if (SchedulerAppUtils.isBlacklisted(application, node, LOG)) {
return ContainerAllocation.APP_SKIPPED;
}
ResourceRequest anyRequest =
application.getResourceRequest(priority, ResourceRequest.ANY);
if (null == anyRequest) {
return ContainerAllocation.PRIORITY_SKIPPED;
}
// Required resource
Resource required = anyRequest.getCapability();
// Do we need containers at this 'priority'?
if (application.getTotalRequiredResources(priority) <= 0) {
return ContainerAllocation.PRIORITY_SKIPPED;
}
// AM container allocation doesn't support non-exclusive allocation to
// avoid painful of preempt an AM container
if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
RMAppAttempt rmAppAttempt =
rmContext.getRMApps().get(application.getApplicationId())
.getCurrentAppAttempt();
if (rmAppAttempt.getSubmissionContext().getUnmanagedAM() == false
&& null == rmAppAttempt.getMasterContainer()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip allocating AM container to app_attempt="
+ application.getApplicationAttemptId()
+ ", don't allow to allocate AM container in non-exclusive mode");
}
return ContainerAllocation.APP_SKIPPED;
}
}
// Is the node-label-expression of this offswitch resource request
// matches the node's label?
// If not match, jump to next priority.
if (!SchedulerUtils.checkResourceRequestMatchingNodePartition(anyRequest,
node.getPartition(), schedulingMode)) {
return ContainerAllocation.PRIORITY_SKIPPED;
}
if (!application.getCSLeafQueue().getReservationContinueLooking()) {
if (!shouldAllocOrReserveNewContainer(priority, required)) {
if (LOG.isDebugEnabled()) {
LOG.debug("doesn't need containers based on reservation algo!");
}
return ContainerAllocation.PRIORITY_SKIPPED;
}
}
if (!checkHeadroom(clusterResource, resourceLimits, required, node)) {
if (LOG.isDebugEnabled()) {
LOG.debug("cannot allocate required resource=" + required
+ " because of headroom");
}
return ContainerAllocation.QUEUE_SKIPPED;
}
// Inform the application it is about to get a scheduling opportunity
application.addSchedulingOpportunity(priority);
// Increase missed-non-partitioned-resource-request-opportunity.
// This is to make sure non-partitioned-resource-request will prefer
// to be allocated to non-partitioned nodes
int missedNonPartitionedRequestSchedulingOpportunity = 0;
if (anyRequest.getNodeLabelExpression()
.equals(RMNodeLabelsManager.NO_LABEL)) {
missedNonPartitionedRequestSchedulingOpportunity =
application
.addMissedNonPartitionedRequestSchedulingOpportunity(priority);
}
if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
// Before doing allocation, we need to check scheduling opportunity to
// make sure : non-partitioned resource request should be scheduled to
// non-partitioned partition first.
if (missedNonPartitionedRequestSchedulingOpportunity < rmContext
.getScheduler().getNumClusterNodes()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip app_attempt=" + application.getApplicationAttemptId()
+ " priority=" + priority
+ " because missed-non-partitioned-resource-request"
+ " opportunity under requred:" + " Now="
+ missedNonPartitionedRequestSchedulingOpportunity + " required="
+ rmContext.getScheduler().getNumClusterNodes());
}
return ContainerAllocation.APP_SKIPPED;
}
}
return null;
}
@Override
ContainerAllocation preAllocation(Resource clusterResource,
FiCaSchedulerNode node, SchedulingMode schedulingMode,
ResourceLimits resourceLimits, Priority priority,
RMContainer reservedContainer) {
ContainerAllocation result;
if (null == reservedContainer) {
// pre-check when allocating new container
result =
preCheckForNewContainer(clusterResource, node, schedulingMode,
resourceLimits, priority);
if (null != result) {
return result;
}
} else {
// pre-check when allocating reserved container
if (application.getTotalRequiredResources(priority) == 0) {
// Release
return new ContainerAllocation(reservedContainer, null,
AllocationState.QUEUE_SKIPPED);
}
}
// Try to allocate containers on node
result =
assignContainersOnNode(clusterResource, node, priority,
reservedContainer, schedulingMode, resourceLimits);
if (null == reservedContainer) {
if (result.state == AllocationState.PRIORITY_SKIPPED) {
// Don't count 'skipped nodes' as a scheduling opportunity!
application.subtractSchedulingOpportunity(priority);
}
}
return result;
}
public synchronized float getLocalityWaitFactor(
Priority priority, int clusterNodes) {
// Estimate: Required unique resources (i.e. hosts + racks)
int requiredResources =
Math.max(application.getResourceRequests(priority).size() - 1, 0);
// waitFactor can't be more than '1'
// i.e. no point skipping more than clustersize opportunities
return Math.min(((float)requiredResources / clusterNodes), 1.0f);
}
private int getActualNodeLocalityDelay() {
return Math.min(rmContext.getScheduler().getNumClusterNodes(), application
.getCSLeafQueue().getNodeLocalityDelay());
}
private boolean canAssign(Priority priority, FiCaSchedulerNode node,
NodeType type, RMContainer reservedContainer) {
// Clearly we need containers for this application...
if (type == NodeType.OFF_SWITCH) {
if (reservedContainer != null) {
return true;
}
// 'Delay' off-switch
ResourceRequest offSwitchRequest =
application.getResourceRequest(priority, ResourceRequest.ANY);
long missedOpportunities = application.getSchedulingOpportunities(priority);
long requiredContainers = offSwitchRequest.getNumContainers();
float localityWaitFactor =
getLocalityWaitFactor(priority, rmContext.getScheduler()
.getNumClusterNodes());
return ((requiredContainers * localityWaitFactor) < missedOpportunities);
}
// Check if we need containers on this rack
ResourceRequest rackLocalRequest =
application.getResourceRequest(priority, node.getRackName());
if (rackLocalRequest == null || rackLocalRequest.getNumContainers() <= 0) {
return false;
}
// If we are here, we do need containers on this rack for RACK_LOCAL req
if (type == NodeType.RACK_LOCAL) {
// 'Delay' rack-local just a little bit...
long missedOpportunities = application.getSchedulingOpportunities(priority);
return getActualNodeLocalityDelay() < missedOpportunities;
}
// Check if we need containers on this host
if (type == NodeType.NODE_LOCAL) {
// Now check if we need containers on this host...
ResourceRequest nodeLocalRequest =
application.getResourceRequest(priority, node.getNodeName());
if (nodeLocalRequest != null) {
return nodeLocalRequest.getNumContainers() > 0;
}
}
return false;
}
private ContainerAllocation assignNodeLocalContainers(
Resource clusterResource, ResourceRequest nodeLocalResourceRequest,
FiCaSchedulerNode node, Priority priority, RMContainer reservedContainer,
SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
if (canAssign(priority, node, NodeType.NODE_LOCAL, reservedContainer)) {
return assignContainer(clusterResource, node, priority,
nodeLocalResourceRequest, NodeType.NODE_LOCAL, reservedContainer,
schedulingMode, currentResoureLimits);
}
// Skip node-local request, go to rack-local request
return ContainerAllocation.LOCALITY_SKIPPED;
}
private ContainerAllocation assignRackLocalContainers(
Resource clusterResource, ResourceRequest rackLocalResourceRequest,
FiCaSchedulerNode node, Priority priority, RMContainer reservedContainer,
SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
if (canAssign(priority, node, NodeType.RACK_LOCAL, reservedContainer)) {
return assignContainer(clusterResource, node, priority,
rackLocalResourceRequest, NodeType.RACK_LOCAL, reservedContainer,
schedulingMode, currentResoureLimits);
}
// Skip rack-local request, go to off-switch request
return ContainerAllocation.LOCALITY_SKIPPED;
}
private ContainerAllocation assignOffSwitchContainers(
Resource clusterResource, ResourceRequest offSwitchResourceRequest,
FiCaSchedulerNode node, Priority priority, RMContainer reservedContainer,
SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
if (canAssign(priority, node, NodeType.OFF_SWITCH, reservedContainer)) {
return assignContainer(clusterResource, node, priority,
offSwitchResourceRequest, NodeType.OFF_SWITCH, reservedContainer,
schedulingMode, currentResoureLimits);
}
return ContainerAllocation.QUEUE_SKIPPED;
}
private ContainerAllocation assignContainersOnNode(Resource clusterResource,
FiCaSchedulerNode node, Priority priority, RMContainer reservedContainer,
SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
ContainerAllocation assigned;
NodeType requestType = null;
// Data-local
ResourceRequest nodeLocalResourceRequest =
application.getResourceRequest(priority, node.getNodeName());
if (nodeLocalResourceRequest != null) {
requestType = NodeType.NODE_LOCAL;
assigned =
assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest,
node, priority, reservedContainer, schedulingMode,
currentResoureLimits);
if (Resources.greaterThan(rc, clusterResource,
assigned.getResourceToBeAllocated(), Resources.none())) {
assigned.requestNodeType = requestType;
return assigned;
}
}
// Rack-local
ResourceRequest rackLocalResourceRequest =
application.getResourceRequest(priority, node.getRackName());
if (rackLocalResourceRequest != null) {
if (!rackLocalResourceRequest.getRelaxLocality()) {
return ContainerAllocation.PRIORITY_SKIPPED;
}
if (requestType != NodeType.NODE_LOCAL) {
requestType = NodeType.RACK_LOCAL;
}
assigned =
assignRackLocalContainers(clusterResource, rackLocalResourceRequest,
node, priority, reservedContainer, schedulingMode,
currentResoureLimits);
if (Resources.greaterThan(rc, clusterResource,
assigned.getResourceToBeAllocated(), Resources.none())) {
assigned.requestNodeType = requestType;
return assigned;
}
}
// Off-switch
ResourceRequest offSwitchResourceRequest =
application.getResourceRequest(priority, ResourceRequest.ANY);
if (offSwitchResourceRequest != null) {
if (!offSwitchResourceRequest.getRelaxLocality()) {
return ContainerAllocation.PRIORITY_SKIPPED;
}
if (requestType != NodeType.NODE_LOCAL
&& requestType != NodeType.RACK_LOCAL) {
requestType = NodeType.OFF_SWITCH;
}
assigned =
assignOffSwitchContainers(clusterResource, offSwitchResourceRequest,
node, priority, reservedContainer, schedulingMode,
currentResoureLimits);
assigned.requestNodeType = requestType;
return assigned;
}
return ContainerAllocation.PRIORITY_SKIPPED;
}
private ContainerAllocation assignContainer(Resource clusterResource,
FiCaSchedulerNode node, Priority priority, ResourceRequest request,
NodeType type, RMContainer rmContainer, SchedulingMode schedulingMode,
ResourceLimits currentResoureLimits) {
lastResourceRequest = request;
if (LOG.isDebugEnabled()) {
LOG.debug("assignContainers: node=" + node.getNodeName()
+ " application=" + application.getApplicationId()
+ " priority=" + priority.getPriority()
+ " request=" + request + " type=" + type);
}
// check if the resource request can access the label
if (!SchedulerUtils.checkResourceRequestMatchingNodePartition(request,
node.getPartition(), schedulingMode)) {
// this is a reserved container, but we cannot allocate it now according
// to label not match. This can be caused by node label changed
// We should un-reserve this container.
return new ContainerAllocation(rmContainer, null,
AllocationState.QUEUE_SKIPPED);
}
Resource capability = request.getCapability();
Resource available = node.getAvailableResource();
Resource totalResource = node.getTotalResource();
if (!Resources.lessThanOrEqual(rc, clusterResource,
capability, totalResource)) {
LOG.warn("Node : " + node.getNodeID()
+ " does not have sufficient resource for request : " + request
+ " node total capability : " + node.getTotalResource());
return ContainerAllocation.QUEUE_SKIPPED;
}
assert Resources.greaterThan(
rc, clusterResource, available, Resources.none());
boolean shouldAllocOrReserveNewContainer = shouldAllocOrReserveNewContainer(
priority, capability);
// Can we allocate a container on this node?
int availableContainers =
rc.computeAvailableContainers(available, capability);
// How much need to unreserve equals to:
// max(required - headroom, amountNeedUnreserve)
Resource resourceNeedToUnReserve =
Resources.max(rc, clusterResource,
Resources.subtract(capability, currentResoureLimits.getHeadroom()),
currentResoureLimits.getAmountNeededUnreserve());
boolean needToUnreserve =
Resources.greaterThan(rc, clusterResource,
resourceNeedToUnReserve, Resources.none());
RMContainer unreservedContainer = null;
boolean reservationsContinueLooking =
application.getCSLeafQueue().getReservationContinueLooking();
if (availableContainers > 0) {
// Allocate...
// We will only do continuous reservation when this is not allocated from
// reserved container
if (rmContainer == null && reservationsContinueLooking
&& node.getLabels().isEmpty()) {
// when reservationsContinueLooking is set, we may need to unreserve
// some containers to meet this queue, its parents', or the users'
// resource limits.
// TODO, need change here when we want to support continuous reservation
// looking for labeled partitions.
if (!shouldAllocOrReserveNewContainer || needToUnreserve) {
if (!needToUnreserve) {
// If we shouldn't allocate/reserve new container then we should
// unreserve one the same size we are asking for since the
// currentResoureLimits.getAmountNeededUnreserve could be zero. If
// the limit was hit then use the amount we need to unreserve to be
// under the limit.
resourceNeedToUnReserve = capability;
}
unreservedContainer =
application.findNodeToUnreserve(clusterResource, node, priority,
resourceNeedToUnReserve);
// When (minimum-unreserved-resource > 0 OR we cannot allocate
// new/reserved
// container (That means we *have to* unreserve some resource to
// continue)). If we failed to unreserve some resource, we can't
// continue.
if (null == unreservedContainer) {
return ContainerAllocation.QUEUE_SKIPPED;
}
}
}
ContainerAllocation result =
new ContainerAllocation(unreservedContainer, request.getCapability(),
AllocationState.ALLOCATED);
result.containerNodeType = type;
return result;
} else {
// if we are allowed to allocate but this node doesn't have space, reserve it or
// if this was an already a reserved container, reserve it again
if (shouldAllocOrReserveNewContainer || rmContainer != null) {
if (reservationsContinueLooking && rmContainer == null) {
// we could possibly ignoring queue capacity or user limits when
// reservationsContinueLooking is set. Make sure we didn't need to unreserve
// one.
if (needToUnreserve) {
if (LOG.isDebugEnabled()) {
LOG.debug("we needed to unreserve to be able to allocate");
}
return ContainerAllocation.QUEUE_SKIPPED;
}
}
ContainerAllocation result =
new ContainerAllocation(null, request.getCapability(),
AllocationState.RESERVED);
result.containerNodeType = type;
return result;
}
return ContainerAllocation.QUEUE_SKIPPED;
}
}
boolean
shouldAllocOrReserveNewContainer(Priority priority, Resource required) {
int requiredContainers = application.getTotalRequiredResources(priority);
int reservedContainers = application.getNumReservedContainers(priority);
int starvation = 0;
if (reservedContainers > 0) {
float nodeFactor =
Resources
.ratio(rc, required, application.getCSLeafQueue().getMaximumAllocation());
// Use percentage of node required to bias against large containers...
// Protect against corner case where you need the whole node with
// Math.min(nodeFactor, minimumAllocationFactor)
starvation =
(int) ((application.getReReservations(priority) /
(float) reservedContainers) * (1.0f - (Math.min(
nodeFactor, application.getCSLeafQueue()
.getMinimumAllocationFactor()))));
if (LOG.isDebugEnabled()) {
LOG.debug("needsContainers:" + " app.#re-reserve="
+ application.getReReservations(priority) + " reserved="
+ reservedContainers + " nodeFactor=" + nodeFactor
+ " minAllocFactor="
+ application.getCSLeafQueue().getMinimumAllocationFactor()
+ " starvation=" + starvation);
}
}
return (((starvation + requiredContainers) - reservedContainers) > 0);
}
private Container getContainer(RMContainer rmContainer,
FiCaSchedulerNode node, Resource capability, Priority priority) {
return (rmContainer != null) ? rmContainer.getContainer()
: createContainer(node, capability, priority);
}
private Container createContainer(FiCaSchedulerNode node, Resource capability,
Priority priority) {
NodeId nodeId = node.getRMNode().getNodeID();
ContainerId containerId =
BuilderUtils.newContainerId(application.getApplicationAttemptId(),
application.getNewContainerId());
// Create the container
return BuilderUtils.newContainer(containerId, nodeId, node.getRMNode()
.getHttpAddress(), capability, priority, null);
}
private ContainerAllocation handleNewContainerAllocation(
ContainerAllocation allocationResult, FiCaSchedulerNode node,
Priority priority, RMContainer reservedContainer, Container container) {
// Handling container allocation
// Did we previously reserve containers at this 'priority'?
if (reservedContainer != null) {
application.unreserve(priority, node, reservedContainer);
}
// Inform the application
RMContainer allocatedContainer =
application.allocate(allocationResult.containerNodeType, node,
priority, lastResourceRequest, container);
// Does the application need this resource?
if (allocatedContainer == null) {
// Skip this app if we failed to allocate.
ContainerAllocation ret =
new ContainerAllocation(allocationResult.containerToBeUnreserved,
null, AllocationState.QUEUE_SKIPPED);
ret.state = AllocationState.APP_SKIPPED;
return ret;
}
// Inform the node
node.allocateContainer(allocatedContainer);
// update locality statistics
application.incNumAllocatedContainers(allocationResult.containerNodeType,
allocationResult.requestNodeType);
return allocationResult;
}
@Override
ContainerAllocation doAllocation(ContainerAllocation allocationResult,
Resource clusterResource, FiCaSchedulerNode node,
SchedulingMode schedulingMode, Priority priority,
RMContainer reservedContainer) {
// Create the container if necessary
Container container =
getContainer(reservedContainer, node,
allocationResult.getResourceToBeAllocated(), priority);
// something went wrong getting/creating the container
if (container == null) {
LOG.warn("Couldn't get container for allocation!");
return ContainerAllocation.QUEUE_SKIPPED;
}
if (allocationResult.getAllocationState() == AllocationState.ALLOCATED) {
// When allocating container
allocationResult =
handleNewContainerAllocation(allocationResult, node, priority,
reservedContainer, container);
} else {
// When reserving container
application.reserve(priority, node, reservedContainer, container);
}
allocationResult.updatedContainer = container;
// Only reset opportunities when we FIRST allocate the container. (IAW, When
// reservedContainer != null, it's not the first time)
if (reservedContainer == null) {
// Don't reset scheduling opportunities for off-switch assignments
// otherwise the app will be delayed for each non-local assignment.
// This helps apps with many off-cluster requests schedule faster.
if (allocationResult.containerNodeType != NodeType.OFF_SWITCH) {
if (LOG.isDebugEnabled()) {
LOG.debug("Resetting scheduling opportunities");
}
application.resetSchedulingOpportunities(priority);
}
// Non-exclusive scheduling opportunity is different: we need reset
// it every time to make sure non-labeled resource request will be
// most likely allocated on non-labeled nodes first.
application.resetMissedNonPartitionedRequestSchedulingOpportunity(priority);
}
return allocationResult;
}
}
| 25,821 | 39.987302 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* For an application, resource limits and resource requests, decide how to
* allocate container. This is to make application resource allocation logic
* extensible.
*/
public abstract class ContainerAllocator {
FiCaSchedulerApp application;
final ResourceCalculator rc;
final RMContext rmContext;
public ContainerAllocator(FiCaSchedulerApp application,
ResourceCalculator rc, RMContext rmContext) {
this.application = application;
this.rc = rc;
this.rmContext = rmContext;
}
/**
* preAllocation is to perform checks, etc. to see if we can/cannot allocate
* container. It will put necessary information to returned
* {@link ContainerAllocation}.
*/
abstract ContainerAllocation preAllocation(
Resource clusterResource, FiCaSchedulerNode node,
SchedulingMode schedulingMode, ResourceLimits resourceLimits,
Priority priority, RMContainer reservedContainer);
/**
* doAllocation is to update application metrics, create containers, etc.
* According to allocating conclusion decided by preAllocation.
*/
abstract ContainerAllocation doAllocation(
ContainerAllocation allocationResult, Resource clusterResource,
FiCaSchedulerNode node, SchedulingMode schedulingMode, Priority priority,
RMContainer reservedContainer);
boolean checkHeadroom(Resource clusterResource,
ResourceLimits currentResourceLimits, Resource required,
FiCaSchedulerNode node) {
// If headroom + currentReservation < required, we cannot allocate this
// require
Resource resourceCouldBeUnReserved = application.getCurrentReservation();
if (!application.getCSLeafQueue().getReservationContinueLooking()
|| !node.getPartition().equals(RMNodeLabelsManager.NO_LABEL)) {
// If we don't allow reservation continuous looking, OR we're looking at
// non-default node partition, we won't allow to unreserve before
// allocation.
resourceCouldBeUnReserved = Resources.none();
}
return Resources.greaterThanOrEqual(rc, clusterResource, Resources.add(
currentResourceLimits.getHeadroom(), resourceCouldBeUnReserved),
required);
}
/**
* allocate needs to handle following stuffs:
*
* <ul>
* <li>Select request: Select a request to allocate. E.g. select a resource
* request based on requirement/priority/locality.</li>
* <li>Check if a given resource can be allocated based on resource
* availability</li>
* <li>Do allocation: this will decide/create allocated/reserved
* container, this will also update metrics</li>
* </ul>
*/
public ContainerAllocation allocate(Resource clusterResource,
FiCaSchedulerNode node, SchedulingMode schedulingMode,
ResourceLimits resourceLimits, Priority priority,
RMContainer reservedContainer) {
ContainerAllocation result =
preAllocation(clusterResource, node, schedulingMode,
resourceLimits, priority, reservedContainer);
if (AllocationState.ALLOCATED == result.state
|| AllocationState.RESERVED == result.state) {
result = doAllocation(result, clusterResource, node,
schedulingMode, priority, reservedContainer);
}
return result;
}
}
| 4,950 | 42.052174 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeRemovedSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
public class NodeRemovedSchedulerEvent extends SchedulerEvent {
private final RMNode rmNode;
public NodeRemovedSchedulerEvent(RMNode rmNode) {
super(SchedulerEventType.NODE_REMOVED);
this.rmNode = rmNode;
}
public RMNode getRemovedRMNode() {
return rmNode;
}
}
| 1,236 | 32.432432 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ContainerExpiredSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
/**
* The {@link SchedulerEvent} which notifies that a {@link ContainerId}
* has expired, sent by {@link ContainerAllocationExpirer}
*
*/
public class ContainerExpiredSchedulerEvent extends SchedulerEvent {
private final ContainerId containerId;
public ContainerExpiredSchedulerEvent(ContainerId containerId) {
super(SchedulerEventType.CONTAINER_EXPIRED);
this.containerId = containerId;
}
public ContainerId getContainerId() {
return containerId;
}
}
| 1,514 | 34.232558 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeResourceUpdateSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
public class NodeResourceUpdateSchedulerEvent extends SchedulerEvent {
private final RMNode rmNode;
private final ResourceOption resourceOption;
public NodeResourceUpdateSchedulerEvent(RMNode rmNode,
ResourceOption resourceOption) {
super(SchedulerEventType.NODE_RESOURCE_UPDATE);
this.rmNode = rmNode;
this.resourceOption = resourceOption;
}
public RMNode getRMNode() {
return rmNode;
}
public ResourceOption getResourceOption() {
return resourceOption;
}
}
| 1,514 | 33.431818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.ReservationId;
public class AppAddedSchedulerEvent extends SchedulerEvent {
private final ApplicationId applicationId;
private final String queue;
private final String user;
private final ReservationId reservationID;
private final boolean isAppRecovering;
private final Priority appPriority;
public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
String user) {
this(applicationId, queue, user, false, null, Priority.newInstance(0));
}
public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
String user, ReservationId reservationID, Priority appPriority) {
this(applicationId, queue, user, false, reservationID, appPriority);
}
public AppAddedSchedulerEvent(String user,
ApplicationSubmissionContext submissionContext, boolean isAppRecovering) {
this(submissionContext.getApplicationId(), submissionContext.getQueue(),
user, isAppRecovering, submissionContext.getReservationID(),
submissionContext.getPriority());
}
public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
String user, boolean isAppRecovering, ReservationId reservationID,
Priority appPriority) {
super(SchedulerEventType.APP_ADDED);
this.applicationId = applicationId;
this.queue = queue;
this.user = user;
this.reservationID = reservationID;
this.isAppRecovering = isAppRecovering;
this.appPriority = appPriority;
}
public ApplicationId getApplicationId() {
return applicationId;
}
public String getQueue() {
return queue;
}
public String getUser() {
return user;
}
public boolean getIsAppRecovering() {
return isAppRecovering;
}
public ReservationId getReservationID() {
return reservationID;
}
public Priority getApplicatonPriority() {
return appPriority;
}
}
| 2,960 | 32.647727 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
public class NodeUpdateSchedulerEvent extends SchedulerEvent {
private final RMNode rmNode;
public NodeUpdateSchedulerEvent(RMNode rmNode) {
super(SchedulerEventType.NODE_UPDATE);
this.rmNode = rmNode;
}
public RMNode getRMNode() {
return rmNode;
}
}
| 1,225 | 33.055556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeAddedSchedulerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
import java.util.List;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
public class NodeAddedSchedulerEvent extends SchedulerEvent {
private final RMNode rmNode;
private final List<NMContainerStatus> containerReports;
public NodeAddedSchedulerEvent(RMNode rmNode) {
super(SchedulerEventType.NODE_ADDED);
this.rmNode = rmNode;
this.containerReports = null;
}
public NodeAddedSchedulerEvent(RMNode rmNode,
List<NMContainerStatus> containerReports) {
super(SchedulerEventType.NODE_ADDED);
this.rmNode = rmNode;
this.containerReports = containerReports;
}
public RMNode getAddedRMNode() {
return rmNode;
}
public List<NMContainerStatus> getContainerReports() {
return containerReports;
}
}
| 1,727 | 32.230769 | 75 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.