repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.recovery; import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; // The state store to use when state isn't being stored public class NMNullStateStoreService extends NMStateStoreService { public NMNullStateStoreService() { super(NMNullStateStoreService.class.getName()); } @Override public boolean canRecover() { return false; } @Override public RecoveredApplicationsState loadApplicationsState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeApplication(ApplicationId appId, ContainerManagerApplicationProto p) throws IOException { } @Override public void storeFinishedApplication(ApplicationId appId) { } @Override public void removeApplication(ApplicationId appId) throws IOException { } @Override public List<RecoveredContainerState> loadContainersState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeContainer(ContainerId containerId, StartContainerRequest startRequest) throws IOException { } @Override public void storeContainerDiagnostics(ContainerId containerId, StringBuilder diagnostics) throws IOException { } @Override public void storeContainerLaunched(ContainerId containerId) throws IOException { } @Override public void storeContainerKilled(ContainerId containerId) throws IOException { } @Override public void storeContainerCompleted(ContainerId containerId, int exitCode) throws IOException { } @Override public void removeContainer(ContainerId containerId) throws IOException { } @Override public RecoveredLocalizationState loadLocalizationState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void startResourceLocalization(String user, ApplicationId appId, LocalResourceProto proto, Path localPath) throws IOException { } @Override public void finishResourceLocalization(String user, ApplicationId appId, LocalizedResourceProto proto) throws IOException { } @Override public void removeLocalizedResource(String user, ApplicationId appId, Path localPath) throws IOException { } @Override public RecoveredDeletionServiceState loadDeletionServiceState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeDeletionTask(int taskId, DeletionServiceDeleteTaskProto taskProto) throws IOException { } @Override public void removeDeletionTask(int taskId) throws IOException { } @Override public RecoveredNMTokensState loadNMTokensState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeNMTokenCurrentMasterKey(MasterKey key) throws IOException { } @Override public void storeNMTokenPreviousMasterKey(MasterKey key) throws IOException { } @Override public void storeNMTokenApplicationMasterKey(ApplicationAttemptId attempt, MasterKey key) throws IOException { } @Override public void removeNMTokenApplicationMasterKey(ApplicationAttemptId attempt) throws IOException { } @Override public RecoveredContainerTokensState loadContainerTokensState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeContainerTokenCurrentMasterKey(MasterKey key) throws IOException { } @Override public void storeContainerTokenPreviousMasterKey(MasterKey key) throws IOException { } @Override public void storeContainerToken(ContainerId containerId, Long expirationTime) throws IOException { } @Override public void removeContainerToken(ContainerId containerId) throws IOException { } @Override public RecoveredLogDeleterState loadLogDeleterState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @Override public void storeLogDeleter(ApplicationId appId, LogDeleterProto proto) throws IOException { } @Override public void removeLogDeleter(ApplicationId appId) throws IOException { } @Override protected void initStorage(Configuration conf) throws IOException { } @Override protected void startStorage() throws IOException { } @Override protected void closeStorage() throws IOException { } }
6,464
28.121622
105
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.recovery; import static org.fusesource.leveldbjni.JniDBFactory.asString; import static org.fusesource.leveldbjni.JniDBFactory.bytes; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.util.ConverterUtils; import org.fusesource.leveldbjni.JniDBFactory; import org.fusesource.leveldbjni.internal.NativeDB; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBException; import org.iq80.leveldb.Logger; import org.iq80.leveldb.Options; import org.iq80.leveldb.WriteBatch; import com.google.common.annotations.VisibleForTesting; public class NMLeveldbStateStoreService extends NMStateStoreService { public static final Log LOG = LogFactory.getLog(NMLeveldbStateStoreService.class); private static final String DB_NAME = "yarn-nm-state"; private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version"; private static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 0); private static final String DELETION_TASK_KEY_PREFIX = "DeletionService/deltask_"; private static final String APPLICATIONS_KEY_PREFIX = "ContainerManager/applications/"; private static final String FINISHED_APPS_KEY_PREFIX = "ContainerManager/finishedApps/"; private static final String LOCALIZATION_KEY_PREFIX = "Localization/"; private static final String LOCALIZATION_PUBLIC_KEY_PREFIX = LOCALIZATION_KEY_PREFIX + "public/"; private static final String LOCALIZATION_PRIVATE_KEY_PREFIX = LOCALIZATION_KEY_PREFIX + "private/"; private static final String LOCALIZATION_STARTED_SUFFIX = "started/"; private static final String LOCALIZATION_COMPLETED_SUFFIX = "completed/"; private static final String LOCALIZATION_FILECACHE_SUFFIX = "filecache/"; private static final String LOCALIZATION_APPCACHE_SUFFIX = "appcache/"; private static final String CONTAINERS_KEY_PREFIX = "ContainerManager/containers/"; private static final String CONTAINER_REQUEST_KEY_SUFFIX = "/request"; private static final String CONTAINER_DIAGS_KEY_SUFFIX = "/diagnostics"; private static final String CONTAINER_LAUNCHED_KEY_SUFFIX = "/launched"; private static final String CONTAINER_KILLED_KEY_SUFFIX = "/killed"; private static final String CONTAINER_EXIT_CODE_KEY_SUFFIX = "/exitcode"; private static final String CURRENT_MASTER_KEY_SUFFIX = "CurrentMasterKey"; private static final String PREV_MASTER_KEY_SUFFIX = "PreviousMasterKey"; private static final String NM_TOKENS_KEY_PREFIX = "NMTokens/"; private static final String NM_TOKENS_CURRENT_MASTER_KEY = NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX; private static final String NM_TOKENS_PREV_MASTER_KEY = NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; private static final String CONTAINER_TOKENS_KEY_PREFIX = "ContainerTokens/"; private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY = CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX; private static final String CONTAINER_TOKENS_PREV_MASTER_KEY = CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; private static final String LOG_DELETER_KEY_PREFIX = "LogDeleters/"; private static final byte[] EMPTY_VALUE = new byte[0]; private DB db; private boolean isNewlyCreated; public NMLeveldbStateStoreService() { super(NMLeveldbStateStoreService.class.getName()); } @Override protected void startStorage() throws IOException { } @Override protected void closeStorage() throws IOException { if (db != null) { db.close(); } } @Override public boolean isNewlyCreated() { return isNewlyCreated; } @Override public List<RecoveredContainerState> loadContainersState() throws IOException { ArrayList<RecoveredContainerState> containers = new ArrayList<RecoveredContainerState>(); ArrayList<ContainerId> containersToRemove = new ArrayList<ContainerId>(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(CONTAINERS_KEY_PREFIX)); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(CONTAINERS_KEY_PREFIX)) { break; } int idEndPos = key.indexOf('/', CONTAINERS_KEY_PREFIX.length()); if (idEndPos < 0) { throw new IOException("Unable to determine container in key: " + key); } ContainerId containerId = ConverterUtils.toContainerId( key.substring(CONTAINERS_KEY_PREFIX.length(), idEndPos)); String keyPrefix = key.substring(0, idEndPos+1); RecoveredContainerState rcs = loadContainerState(containerId, iter, keyPrefix); // Don't load container without StartContainerRequest if (rcs.startRequest != null) { containers.add(rcs); } else { containersToRemove.add(containerId); } } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } // remove container without StartContainerRequest for (ContainerId containerId : containersToRemove) { LOG.warn("Remove container " + containerId + " with incomplete records"); try { removeContainer(containerId); // TODO: kill and cleanup the leaked container } catch (IOException e) { LOG.error("Unable to remove container " + containerId + " in store", e); } } return containers; } private RecoveredContainerState loadContainerState(ContainerId containerId, LeveldbIterator iter, String keyPrefix) throws IOException { RecoveredContainerState rcs = new RecoveredContainerState(); rcs.status = RecoveredContainerStatus.REQUESTED; while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } iter.next(); String suffix = key.substring(keyPrefix.length()-1); // start with '/' if (suffix.equals(CONTAINER_REQUEST_KEY_SUFFIX)) { rcs.startRequest = new StartContainerRequestPBImpl( StartContainerRequestProto.parseFrom(entry.getValue())); } else if (suffix.equals(CONTAINER_DIAGS_KEY_SUFFIX)) { rcs.diagnostics = asString(entry.getValue()); } else if (suffix.equals(CONTAINER_LAUNCHED_KEY_SUFFIX)) { if (rcs.status == RecoveredContainerStatus.REQUESTED) { rcs.status = RecoveredContainerStatus.LAUNCHED; } } else if (suffix.equals(CONTAINER_KILLED_KEY_SUFFIX)) { rcs.killed = true; } else if (suffix.equals(CONTAINER_EXIT_CODE_KEY_SUFFIX)) { rcs.status = RecoveredContainerStatus.COMPLETED; rcs.exitCode = Integer.parseInt(asString(entry.getValue())); } else { throw new IOException("Unexpected container state key: " + key); } } return rcs; } @Override public void storeContainer(ContainerId containerId, StartContainerRequest startRequest) throws IOException { String key = CONTAINERS_KEY_PREFIX + containerId.toString() + CONTAINER_REQUEST_KEY_SUFFIX; try { db.put(bytes(key), ((StartContainerRequestPBImpl) startRequest).getProto().toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public void storeContainerDiagnostics(ContainerId containerId, StringBuilder diagnostics) throws IOException { String key = CONTAINERS_KEY_PREFIX + containerId.toString() + CONTAINER_DIAGS_KEY_SUFFIX; try { db.put(bytes(key), bytes(diagnostics.toString())); } catch (DBException e) { throw new IOException(e); } } @Override public void storeContainerLaunched(ContainerId containerId) throws IOException { String key = CONTAINERS_KEY_PREFIX + containerId.toString() + CONTAINER_LAUNCHED_KEY_SUFFIX; try { db.put(bytes(key), EMPTY_VALUE); } catch (DBException e) { throw new IOException(e); } } @Override public void storeContainerKilled(ContainerId containerId) throws IOException { String key = CONTAINERS_KEY_PREFIX + containerId.toString() + CONTAINER_KILLED_KEY_SUFFIX; try { db.put(bytes(key), EMPTY_VALUE); } catch (DBException e) { throw new IOException(e); } } @Override public void storeContainerCompleted(ContainerId containerId, int exitCode) throws IOException { String key = CONTAINERS_KEY_PREFIX + containerId.toString() + CONTAINER_EXIT_CODE_KEY_SUFFIX; try { db.put(bytes(key), bytes(Integer.toString(exitCode))); } catch (DBException e) { throw new IOException(e); } } @Override public void removeContainer(ContainerId containerId) throws IOException { String keyPrefix = CONTAINERS_KEY_PREFIX + containerId.toString(); try { WriteBatch batch = db.createWriteBatch(); try { batch.delete(bytes(keyPrefix + CONTAINER_REQUEST_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_DIAGS_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_LAUNCHED_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_KILLED_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_EXIT_CODE_KEY_SUFFIX)); db.write(batch); } finally { batch.close(); } } catch (DBException e) { throw new IOException(e); } } @Override public RecoveredApplicationsState loadApplicationsState() throws IOException { RecoveredApplicationsState state = new RecoveredApplicationsState(); state.applications = new ArrayList<ContainerManagerApplicationProto>(); String keyPrefix = APPLICATIONS_KEY_PREFIX; LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(keyPrefix)); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } state.applications.add( ContainerManagerApplicationProto.parseFrom(entry.getValue())); } state.finishedApplications = new ArrayList<ApplicationId>(); keyPrefix = FINISHED_APPS_KEY_PREFIX; iter.seek(bytes(keyPrefix)); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } ApplicationId appId = ConverterUtils.toApplicationId(key.substring(keyPrefix.length())); state.finishedApplications.add(appId); } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } @Override public void storeApplication(ApplicationId appId, ContainerManagerApplicationProto p) throws IOException { String key = APPLICATIONS_KEY_PREFIX + appId; try { db.put(bytes(key), p.toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public void storeFinishedApplication(ApplicationId appId) throws IOException { String key = FINISHED_APPS_KEY_PREFIX + appId; try { db.put(bytes(key), new byte[0]); } catch (DBException e) { throw new IOException(e); } } @Override public void removeApplication(ApplicationId appId) throws IOException { try { WriteBatch batch = db.createWriteBatch(); try { String key = APPLICATIONS_KEY_PREFIX + appId; batch.delete(bytes(key)); key = FINISHED_APPS_KEY_PREFIX + appId; batch.delete(bytes(key)); db.write(batch); } finally { batch.close(); } } catch (DBException e) { throw new IOException(e); } } @Override public RecoveredLocalizationState loadLocalizationState() throws IOException { RecoveredLocalizationState state = new RecoveredLocalizationState(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(LOCALIZATION_PUBLIC_KEY_PREFIX)); state.publicTrackerState = loadResourceTrackerState(iter, LOCALIZATION_PUBLIC_KEY_PREFIX); iter.seek(bytes(LOCALIZATION_PRIVATE_KEY_PREFIX)); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(LOCALIZATION_PRIVATE_KEY_PREFIX)) { break; } int userEndPos = key.indexOf('/', LOCALIZATION_PRIVATE_KEY_PREFIX.length()); if (userEndPos < 0) { throw new IOException("Unable to determine user in resource key: " + key); } String user = key.substring( LOCALIZATION_PRIVATE_KEY_PREFIX.length(), userEndPos); state.userResources.put(user, loadUserLocalizedResources(iter, key.substring(0, userEndPos+1))); } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } private LocalResourceTrackerState loadResourceTrackerState( LeveldbIterator iter, String keyPrefix) throws IOException { final String completedPrefix = keyPrefix + LOCALIZATION_COMPLETED_SUFFIX; final String startedPrefix = keyPrefix + LOCALIZATION_STARTED_SUFFIX; LocalResourceTrackerState state = new LocalResourceTrackerState(); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } if (key.startsWith(completedPrefix)) { state.localizedResources = loadCompletedResources(iter, completedPrefix); } else if (key.startsWith(startedPrefix)) { state.inProgressResources = loadStartedResources(iter, startedPrefix); } else { throw new IOException("Unexpected key in resource tracker state: " + key); } } return state; } private List<LocalizedResourceProto> loadCompletedResources( LeveldbIterator iter, String keyPrefix) throws IOException { List<LocalizedResourceProto> rsrcs = new ArrayList<LocalizedResourceProto>(); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } if (LOG.isDebugEnabled()) { LOG.debug("Loading completed resource from " + key); } rsrcs.add(LocalizedResourceProto.parseFrom(entry.getValue())); iter.next(); } return rsrcs; } private Map<LocalResourceProto, Path> loadStartedResources( LeveldbIterator iter, String keyPrefix) throws IOException { Map<LocalResourceProto, Path> rsrcs = new HashMap<LocalResourceProto, Path>(); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } Path localPath = new Path(key.substring(keyPrefix.length())); if (LOG.isDebugEnabled()) { LOG.debug("Loading in-progress resource at " + localPath); } rsrcs.put(LocalResourceProto.parseFrom(entry.getValue()), localPath); iter.next(); } return rsrcs; } private RecoveredUserResources loadUserLocalizedResources( LeveldbIterator iter, String keyPrefix) throws IOException { RecoveredUserResources userResources = new RecoveredUserResources(); while (iter.hasNext()) { Entry<byte[],byte[]> entry = iter.peekNext(); String key = asString(entry.getKey()); if (!key.startsWith(keyPrefix)) { break; } if (key.startsWith(LOCALIZATION_FILECACHE_SUFFIX, keyPrefix.length())) { userResources.privateTrackerState = loadResourceTrackerState(iter, keyPrefix + LOCALIZATION_FILECACHE_SUFFIX); } else if (key.startsWith(LOCALIZATION_APPCACHE_SUFFIX, keyPrefix.length())) { int appIdStartPos = keyPrefix.length() + LOCALIZATION_APPCACHE_SUFFIX.length(); int appIdEndPos = key.indexOf('/', appIdStartPos); if (appIdEndPos < 0) { throw new IOException("Unable to determine appID in resource key: " + key); } ApplicationId appId = ConverterUtils.toApplicationId( key.substring(appIdStartPos, appIdEndPos)); userResources.appTrackerStates.put(appId, loadResourceTrackerState(iter, key.substring(0, appIdEndPos+1))); } else { throw new IOException("Unexpected user resource key " + key); } } return userResources; } @Override public void startResourceLocalization(String user, ApplicationId appId, LocalResourceProto proto, Path localPath) throws IOException { String key = getResourceStartedKey(user, appId, localPath.toString()); try { db.put(bytes(key), proto.toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public void finishResourceLocalization(String user, ApplicationId appId, LocalizedResourceProto proto) throws IOException { String localPath = proto.getLocalPath(); String startedKey = getResourceStartedKey(user, appId, localPath); String completedKey = getResourceCompletedKey(user, appId, localPath); if (LOG.isDebugEnabled()) { LOG.debug("Storing localized resource to " + completedKey); } try { WriteBatch batch = db.createWriteBatch(); try { batch.delete(bytes(startedKey)); batch.put(bytes(completedKey), proto.toByteArray()); db.write(batch); } finally { batch.close(); } } catch (DBException e) { throw new IOException(e); } } @Override public void removeLocalizedResource(String user, ApplicationId appId, Path localPath) throws IOException { String localPathStr = localPath.toString(); String startedKey = getResourceStartedKey(user, appId, localPathStr); String completedKey = getResourceCompletedKey(user, appId, localPathStr); if (LOG.isDebugEnabled()) { LOG.debug("Removing local resource at " + localPathStr); } try { WriteBatch batch = db.createWriteBatch(); try { batch.delete(bytes(startedKey)); batch.delete(bytes(completedKey)); db.write(batch); } finally { batch.close(); } } catch (DBException e) { throw new IOException(e); } } private String getResourceStartedKey(String user, ApplicationId appId, String localPath) { return getResourceTrackerKeyPrefix(user, appId) + LOCALIZATION_STARTED_SUFFIX + localPath; } private String getResourceCompletedKey(String user, ApplicationId appId, String localPath) { return getResourceTrackerKeyPrefix(user, appId) + LOCALIZATION_COMPLETED_SUFFIX + localPath; } private String getResourceTrackerKeyPrefix(String user, ApplicationId appId) { if (user == null) { return LOCALIZATION_PUBLIC_KEY_PREFIX; } if (appId == null) { return LOCALIZATION_PRIVATE_KEY_PREFIX + user + "/" + LOCALIZATION_FILECACHE_SUFFIX; } return LOCALIZATION_PRIVATE_KEY_PREFIX + user + "/" + LOCALIZATION_APPCACHE_SUFFIX + appId + "/"; } @Override public RecoveredDeletionServiceState loadDeletionServiceState() throws IOException { RecoveredDeletionServiceState state = new RecoveredDeletionServiceState(); state.tasks = new ArrayList<DeletionServiceDeleteTaskProto>(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(DELETION_TASK_KEY_PREFIX)); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String key = asString(entry.getKey()); if (!key.startsWith(DELETION_TASK_KEY_PREFIX)) { break; } state.tasks.add( DeletionServiceDeleteTaskProto.parseFrom(entry.getValue())); } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } @Override public void storeDeletionTask(int taskId, DeletionServiceDeleteTaskProto taskProto) throws IOException { String key = DELETION_TASK_KEY_PREFIX + taskId; try { db.put(bytes(key), taskProto.toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public void removeDeletionTask(int taskId) throws IOException { String key = DELETION_TASK_KEY_PREFIX + taskId; try { db.delete(bytes(key)); } catch (DBException e) { throw new IOException(e); } } @Override public RecoveredNMTokensState loadNMTokensState() throws IOException { RecoveredNMTokensState state = new RecoveredNMTokensState(); state.applicationMasterKeys = new HashMap<ApplicationAttemptId, MasterKey>(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(NM_TOKENS_KEY_PREFIX)); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String fullKey = asString(entry.getKey()); if (!fullKey.startsWith(NM_TOKENS_KEY_PREFIX)) { break; } String key = fullKey.substring(NM_TOKENS_KEY_PREFIX.length()); if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) { state.currentMasterKey = parseMasterKey(entry.getValue()); } else if (key.equals(PREV_MASTER_KEY_SUFFIX)) { state.previousMasterKey = parseMasterKey(entry.getValue()); } else if (key.startsWith( ApplicationAttemptId.appAttemptIdStrPrefix)) { ApplicationAttemptId attempt; try { attempt = ConverterUtils.toApplicationAttemptId(key); } catch (IllegalArgumentException e) { throw new IOException("Bad application master key state for " + fullKey, e); } state.applicationMasterKeys.put(attempt, parseMasterKey(entry.getValue())); } } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } @Override public void storeNMTokenCurrentMasterKey(MasterKey key) throws IOException { storeMasterKey(NM_TOKENS_CURRENT_MASTER_KEY, key); } @Override public void storeNMTokenPreviousMasterKey(MasterKey key) throws IOException { storeMasterKey(NM_TOKENS_PREV_MASTER_KEY, key); } @Override public void storeNMTokenApplicationMasterKey( ApplicationAttemptId attempt, MasterKey key) throws IOException { storeMasterKey(NM_TOKENS_KEY_PREFIX + attempt, key); } @Override public void removeNMTokenApplicationMasterKey( ApplicationAttemptId attempt) throws IOException { String key = NM_TOKENS_KEY_PREFIX + attempt; try { db.delete(bytes(key)); } catch (DBException e) { throw new IOException(e); } } private MasterKey parseMasterKey(byte[] keyData) throws IOException { return new MasterKeyPBImpl(MasterKeyProto.parseFrom(keyData)); } private void storeMasterKey(String dbKey, MasterKey key) throws IOException { MasterKeyPBImpl pb = (MasterKeyPBImpl) key; try { db.put(bytes(dbKey), pb.getProto().toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public RecoveredContainerTokensState loadContainerTokensState() throws IOException { RecoveredContainerTokensState state = new RecoveredContainerTokensState(); state.activeTokens = new HashMap<ContainerId, Long>(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(CONTAINER_TOKENS_KEY_PREFIX)); final int containerTokensKeyPrefixLength = CONTAINER_TOKENS_KEY_PREFIX.length(); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String fullKey = asString(entry.getKey()); if (!fullKey.startsWith(CONTAINER_TOKENS_KEY_PREFIX)) { break; } String key = fullKey.substring(containerTokensKeyPrefixLength); if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) { state.currentMasterKey = parseMasterKey(entry.getValue()); } else if (key.equals(PREV_MASTER_KEY_SUFFIX)) { state.previousMasterKey = parseMasterKey(entry.getValue()); } else if (key.startsWith(ConverterUtils.CONTAINER_PREFIX)) { loadContainerToken(state, fullKey, key, entry.getValue()); } } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } private static void loadContainerToken(RecoveredContainerTokensState state, String key, String containerIdStr, byte[] value) throws IOException { ContainerId containerId; Long expTime; try { containerId = ConverterUtils.toContainerId(containerIdStr); expTime = Long.parseLong(asString(value)); } catch (IllegalArgumentException e) { throw new IOException("Bad container token state for " + key, e); } state.activeTokens.put(containerId, expTime); } @Override public void storeContainerTokenCurrentMasterKey(MasterKey key) throws IOException { storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key); } @Override public void storeContainerTokenPreviousMasterKey(MasterKey key) throws IOException { storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key); } @Override public void storeContainerToken(ContainerId containerId, Long expTime) throws IOException { String key = CONTAINER_TOKENS_KEY_PREFIX + containerId; try { db.put(bytes(key), bytes(expTime.toString())); } catch (DBException e) { throw new IOException(e); } } @Override public void removeContainerToken(ContainerId containerId) throws IOException { String key = CONTAINER_TOKENS_KEY_PREFIX + containerId; try { db.delete(bytes(key)); } catch (DBException e) { throw new IOException(e); } } @Override public RecoveredLogDeleterState loadLogDeleterState() throws IOException { RecoveredLogDeleterState state = new RecoveredLogDeleterState(); state.logDeleterMap = new HashMap<ApplicationId, LogDeleterProto>(); LeveldbIterator iter = null; try { iter = new LeveldbIterator(db); iter.seek(bytes(LOG_DELETER_KEY_PREFIX)); final int logDeleterKeyPrefixLength = LOG_DELETER_KEY_PREFIX.length(); while (iter.hasNext()) { Entry<byte[], byte[]> entry = iter.next(); String fullKey = asString(entry.getKey()); if (!fullKey.startsWith(LOG_DELETER_KEY_PREFIX)) { break; } String appIdStr = fullKey.substring(logDeleterKeyPrefixLength); ApplicationId appId = null; try { appId = ConverterUtils.toApplicationId(appIdStr); } catch (IllegalArgumentException e) { LOG.warn("Skipping unknown log deleter key " + fullKey); continue; } LogDeleterProto proto = LogDeleterProto.parseFrom(entry.getValue()); state.logDeleterMap.put(appId, proto); } } catch (DBException e) { throw new IOException(e); } finally { if (iter != null) { iter.close(); } } return state; } @Override public void storeLogDeleter(ApplicationId appId, LogDeleterProto proto) throws IOException { String key = getLogDeleterKey(appId); try { db.put(bytes(key), proto.toByteArray()); } catch (DBException e) { throw new IOException(e); } } @Override public void removeLogDeleter(ApplicationId appId) throws IOException { String key = getLogDeleterKey(appId); try { db.delete(bytes(key)); } catch (DBException e) { throw new IOException(e); } } private String getLogDeleterKey(ApplicationId appId) { return LOG_DELETER_KEY_PREFIX + appId; } @Override protected void initStorage(Configuration conf) throws IOException { Path storeRoot = createStorageDir(conf); Options options = new Options(); options.createIfMissing(false); options.logger(new LeveldbLogger()); LOG.info("Using state database at " + storeRoot + " for recovery"); File dbfile = new File(storeRoot.toString()); try { db = JniDBFactory.factory.open(dbfile, options); } catch (NativeDB.DBException e) { if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { LOG.info("Creating state database at " + dbfile); isNewlyCreated = true; options.createIfMissing(true); try { db = JniDBFactory.factory.open(dbfile, options); // store version storeVersion(); } catch (DBException dbErr) { throw new IOException(dbErr.getMessage(), dbErr); } } else { throw e; } } checkVersion(); } private Path createStorageDir(Configuration conf) throws IOException { final String storeUri = conf.get(YarnConfiguration.NM_RECOVERY_DIR); if (storeUri == null) { throw new IOException("No store location directory configured in " + YarnConfiguration.NM_RECOVERY_DIR); } Path root = new Path(storeUri, DB_NAME); FileSystem fs = FileSystem.getLocal(conf); fs.mkdirs(root, new FsPermission((short)0700)); return root; } private static class LeveldbLogger implements Logger { private static final Log LOG = LogFactory.getLog(LeveldbLogger.class); @Override public void log(String message) { LOG.info(message); } } Version loadVersion() throws IOException { byte[] data = db.get(bytes(DB_SCHEMA_VERSION_KEY)); // if version is not stored previously, treat it as CURRENT_VERSION_INFO. if (data == null || data.length == 0) { return getCurrentVersion(); } Version version = new VersionPBImpl(VersionProto.parseFrom(data)); return version; } private void storeVersion() throws IOException { dbStoreVersion(CURRENT_VERSION_INFO); } // Only used for test @VisibleForTesting void storeVersion(Version state) throws IOException { dbStoreVersion(state); } private void dbStoreVersion(Version state) throws IOException { String key = DB_SCHEMA_VERSION_KEY; byte[] data = ((VersionPBImpl) state).getProto().toByteArray(); try { db.put(bytes(key), data); } catch (DBException e) { throw new IOException(e); } } Version getCurrentVersion() { return CURRENT_VERSION_INFO; } /** * 1) Versioning scheme: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc. * 2) Any incompatible change of state-store is a major upgrade, and any * compatible change of state-store is a minor upgrade. * 3) Within a minor upgrade, say 1.1 to 1.2: * overwrite the version info and proceed as normal. * 4) Within a major upgrade, say 1.2 to 2.0: * throw exception and indicate user to use a separate upgrade tool to * upgrade NM state or remove incompatible old state. */ private void checkVersion() throws IOException { Version loadedVersion = loadVersion(); LOG.info("Loaded NM state version info " + loadedVersion); if (loadedVersion.equals(getCurrentVersion())) { return; } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { LOG.info("Storing NM state version info " + getCurrentVersion()); storeVersion(); } else { throw new IOException( "Incompatible version for NM state: expecting NM state version " + getCurrentVersion() + ", but loading version " + loadedVersion); } } }
34,955
32.741313
105
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import java.util.Collections; import java.util.List; import java.util.Map; /** * Encapsulates information required for starting/launching containers. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerStartContext { private final Container container; private final Map<Path, List<String>> localizedResources; private final Path nmPrivateContainerScriptPath; private final Path nmPrivateTokensPath; private final String user; private final String appId; private final Path containerWorkDir; private final List<String> localDirs; private final List<String> logDirs; public static final class Builder { private Container container; private Map<Path, List<String>> localizedResources; private Path nmPrivateContainerScriptPath; private Path nmPrivateTokensPath; private String user; private String appId; private Path containerWorkDir; private List<String> localDirs; private List<String> logDirs; public Builder() { } public Builder setContainer(Container container) { this.container = container; return this; } public Builder setLocalizedResources(Map<Path, List<String>> localizedResources) { this.localizedResources = localizedResources; return this; } public Builder setNmPrivateContainerScriptPath( Path nmPrivateContainerScriptPath) { this.nmPrivateContainerScriptPath = nmPrivateContainerScriptPath; return this; } public Builder setNmPrivateTokensPath(Path nmPrivateTokensPath) { this.nmPrivateTokensPath = nmPrivateTokensPath; return this; } public Builder setUser(String user) { this.user = user; return this; } public Builder setAppId(String appId) { this.appId = appId; return this; } public Builder setContainerWorkDir(Path containerWorkDir) { this.containerWorkDir = containerWorkDir; return this; } public Builder setLocalDirs(List<String> localDirs) { this.localDirs = localDirs; return this; } public Builder setLogDirs(List<String> logDirs) { this.logDirs = logDirs; return this; } public ContainerStartContext build() { return new ContainerStartContext(this); } } private ContainerStartContext(Builder builder) { this.container = builder.container; this.localizedResources = builder.localizedResources; this.nmPrivateContainerScriptPath = builder.nmPrivateContainerScriptPath; this.nmPrivateTokensPath = builder.nmPrivateTokensPath; this.user = builder.user; this.appId = builder.appId; this.containerWorkDir = builder.containerWorkDir; this.localDirs = builder.localDirs; this.logDirs = builder.logDirs; } public Container getContainer() { return this.container; } public Map<Path, List<String>> getLocalizedResources() { if (this.localizedResources != null) { return Collections.unmodifiableMap(this.localizedResources); } else { return null; } } public Path getNmPrivateContainerScriptPath() { return this.nmPrivateContainerScriptPath; } public Path getNmPrivateTokensPath() { return this.nmPrivateTokensPath; } public String getUser() { return this.user; } public String getAppId() { return this.appId; } public Path getContainerWorkDir() { return this.containerWorkDir; } public List<String> getLocalDirs() { return Collections.unmodifiableList(this.localDirs); } public List<String> getLogDirs() { return Collections.unmodifiableList(this.logDirs); } }
4,776
27.777108
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerReacquisitionContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container reacquisition. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerReacquisitionContext { private final Container container; private final String user; private final ContainerId containerId; public static final class Builder { private Container container; private String user; private ContainerId containerId; public Builder() { } public Builder setContainer(Container container) { this.container = container; return this; } public Builder setUser(String user) { this.user = user; return this; } public Builder setContainerId(ContainerId containerId) { this.containerId = containerId; return this; } public ContainerReacquisitionContext build() { return new ContainerReacquisitionContext(this); } } private ContainerReacquisitionContext(Builder builder) { this.container = builder.container; this.user = builder.user; this.containerId = builder.containerId; } public Container getContainer() { return this.container; } public String getUser() { return this.user; } public ContainerId getContainerId() { return this.containerId; } }
2,430
27.940476
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerLivenessContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container liveness checks. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerLivenessContext { private final Container container; private final String user; private final String pid; public static final class Builder { private Container container; private String user; private String pid; public Builder() { } public Builder setContainer(Container container) { this.container = container; return this; } public Builder setUser(String user) { this.user = user; return this; } public Builder setPid(String pid) { this.pid = pid; return this; } public ContainerLivenessContext build() { return new ContainerLivenessContext(this); } } private ContainerLivenessContext(Builder builder) { this.container = builder.container; this.user = builder.user; this.pid = builder.pid; } public Container getContainer() { return this.container; } public String getUser() { return this.user; } public String getPid() { return this.pid; } }
2,257
26.204819
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/LocalizerStartContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import java.net.InetSocketAddress; /** * Encapsulates information required for starting a localizer. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class LocalizerStartContext { private final Path nmPrivateContainerTokens; private final InetSocketAddress nmAddr; private final String user; private final String appId; private final String locId; private final LocalDirsHandlerService dirsHandler; public static final class Builder { private Path nmPrivateContainerTokens; private InetSocketAddress nmAddr; private String user; private String appId; private String locId; private LocalDirsHandlerService dirsHandler; public Builder() { } public Builder setNmPrivateContainerTokens(Path nmPrivateContainerTokens) { this.nmPrivateContainerTokens = nmPrivateContainerTokens; return this; } public Builder setNmAddr(InetSocketAddress nmAddr) { this.nmAddr = nmAddr; return this; } public Builder setUser(String user) { this.user = user; return this; } public Builder setAppId(String appId) { this.appId = appId; return this; } public Builder setLocId(String locId) { this.locId = locId; return this; } public Builder setDirsHandler(LocalDirsHandlerService dirsHandler) { this.dirsHandler = dirsHandler; return this; } public LocalizerStartContext build() { return new LocalizerStartContext(this); } } private LocalizerStartContext(Builder builder) { this.nmPrivateContainerTokens = builder.nmPrivateContainerTokens; this.nmAddr = builder.nmAddr; this.user = builder.user; this.appId = builder.appId; this.locId = builder.locId; this.dirsHandler = builder.dirsHandler; } public Path getNmPrivateContainerTokens() { return this.nmPrivateContainerTokens; } public InetSocketAddress getNmAddr() { return this.nmAddr; } public String getUser() { return this.user; } public String getAppId() { return this.appId; } public String getLocId() { return this.locId; } public LocalDirsHandlerService getDirsHandler() { return this.dirsHandler; } }
3,352
26.483607
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/DeletionAsUserContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import java.util.Arrays; import java.util.Collections; import java.util.List; /** * Encapsulates information required for deletions as a given user. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class DeletionAsUserContext { private final String user; private final Path subDir; private final List<Path> basedirs; public static final class Builder { private String user; private Path subDir; private List<Path> basedirs; public Builder() { } public Builder setUser(String user) { this.user = user; return this; } public Builder setSubDir(Path subDir) { this.subDir = subDir; return this; } public Builder setBasedirs(Path... basedirs) { this.basedirs = Arrays.asList(basedirs); return this; } public DeletionAsUserContext build() { return new DeletionAsUserContext(this); } } private DeletionAsUserContext(Builder builder) { this.user = builder.user; this.subDir = builder.subDir; this.basedirs = builder.basedirs; } public String getUser() { return this.user; } public Path getSubDir() { return this.subDir; } public List<Path> getBasedirs() { if (this.basedirs != null) { return Collections.unmodifiableList(this.basedirs); } else { return null; } } }
2,402
25.406593
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.executor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; /** * Encapsulates information required for container signaling. */ @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerSignalContext { private final Container container; private final String user; private final String pid; private final Signal signal; public static final class Builder { private Container container; private String user; private String pid; private Signal signal; public Builder() { } public Builder setContainer(Container container) { this.container = container; return this; } public Builder setUser(String user) { this.user = user; return this; } public Builder setPid(String pid) { this.pid = pid; return this; } public Builder setSignal(Signal signal) { this.signal = signal; return this; } public ContainerSignalContext build() { return new ContainerSignalContext(this); } } private ContainerSignalContext(Builder builder) { this.container = builder.container; this.user = builder.user; this.pid = builder.pid; this.signal = builder.signal; } public Container getContainer() { return this.container; } public String getUser() { return this.user; } public String getPid() { return this.pid; } public Signal getSignal() { return this.signal; } }
2,569
25.770833
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalizationImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; public class ContainerLocalizationImpl implements ContainerLocalization { public ContainerLocalizationImpl(Dispatcher dispatcher, Application app, LocalizationProtocol localization) { } }
1,274
41.5
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; public enum AuxServicesEventType { APPLICATION_INIT, APPLICATION_STOP, CONTAINER_INIT, CONTAINER_STOP }
973
35.074074
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import java.nio.ByteBuffer; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container .Container; public class AuxServicesEvent extends AbstractEvent<AuxServicesEventType> { private final String user; private final String serviceId; private final ByteBuffer serviceData; private final ApplicationId appId; private final Container container; public AuxServicesEvent(AuxServicesEventType eventType, ApplicationId appId) { this(eventType, null, appId, null, null); } public AuxServicesEvent(AuxServicesEventType eventType, Container container) { this(eventType, null, container.getContainerId().getApplicationAttemptId() .getApplicationId(), null, null, container); } public AuxServicesEvent(AuxServicesEventType eventType, String user, ApplicationId appId, String serviceId, ByteBuffer serviceData) { this(eventType, user, appId, serviceId, serviceData, null); } public AuxServicesEvent(AuxServicesEventType eventType, String user, ApplicationId appId, String serviceId, ByteBuffer serviceData, Container container) { super(eventType); this.user = user; this.appId = appId; this.serviceId = serviceId; this.serviceData = serviceData; this.container = container; } public String getServiceID() { return serviceId; } public ByteBuffer getServiceData() { return serviceData; } public String getUser() { return user; } public ApplicationId getApplicationID() { return appId; } public Container getContainer() { return container; } }
2,617
30.926829
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import static org.apache.hadoop.service.Service.STATE.STARTED; import java.io.DataInputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.InvalidAuxServiceException; import org.apache.hadoop.yarn.exceptions.InvalidContainerException; import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedContainersEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerInitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationFinishEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus; import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; public class ContainerManagerImpl extends CompositeService implements ServiceStateChangeListener, ContainerManagementProtocol, EventHandler<ContainerManagerEvent> { /** * Extra duration to wait for applications to be killed on shutdown. */ private static final int SHUTDOWN_CLEANUP_SLOP_MS = 1000; private static final Log LOG = LogFactory.getLog(ContainerManagerImpl.class); static final String INVALID_NMTOKEN_MSG = "Invalid NMToken"; static final String INVALID_CONTAINERTOKEN_MSG = "Invalid ContainerToken"; final Context context; private final ContainersMonitor containersMonitor; private Server server; private final ResourceLocalizationService rsrcLocalizationSrvc; private final ContainersLauncher containersLauncher; private final AuxServices auxiliaryServices; private final NodeManagerMetrics metrics; private final NodeStatusUpdater nodeStatusUpdater; protected LocalDirsHandlerService dirsHandler; protected final AsyncDispatcher dispatcher; private final ApplicationACLsManager aclsManager; private final DeletionService deletionService; private AtomicBoolean blockNewContainerRequests = new AtomicBoolean(false); private boolean serviceStopped = false; private final ReadLock readLock; private final WriteLock writeLock; private long waitForContainersOnShutdownMillis; public ContainerManagerImpl(Context context, ContainerExecutor exec, DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater, NodeManagerMetrics metrics, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { super(ContainerManagerImpl.class.getName()); this.context = context; this.dirsHandler = dirsHandler; // ContainerManager level dispatcher. dispatcher = new AsyncDispatcher(); this.deletionService = deletionContext; this.metrics = metrics; rsrcLocalizationSrvc = createResourceLocalizationService(exec, deletionContext, context); addService(rsrcLocalizationSrvc); containersLauncher = createContainersLauncher(context, exec); addService(containersLauncher); this.nodeStatusUpdater = nodeStatusUpdater; this.aclsManager = aclsManager; // Start configurable services auxiliaryServices = new AuxServices(); auxiliaryServices.registerServiceListener(this); addService(auxiliaryServices); this.containersMonitor = new ContainersMonitorImpl(exec, dispatcher, this.context); addService(this.containersMonitor); dispatcher.register(ContainerEventType.class, new ContainerEventDispatcher()); dispatcher.register(ApplicationEventType.class, new ApplicationEventDispatcher()); dispatcher.register(LocalizationEventType.class, rsrcLocalizationSrvc); dispatcher.register(AuxServicesEventType.class, auxiliaryServices); dispatcher.register(ContainersMonitorEventType.class, containersMonitor); dispatcher.register(ContainersLauncherEventType.class, containersLauncher); addService(dispatcher); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); } @Override public void serviceInit(Configuration conf) throws Exception { LogHandler logHandler = createLogHandler(conf, this.context, this.deletionService); addIfService(logHandler); dispatcher.register(LogHandlerEventType.class, logHandler); // add the shared cache upload service (it will do nothing if the shared // cache is disabled) SharedCacheUploadService sharedCacheUploader = createSharedCacheUploaderService(); addService(sharedCacheUploader); dispatcher.register(SharedCacheUploadEventType.class, sharedCacheUploader); waitForContainersOnShutdownMillis = conf.getLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, YarnConfiguration.DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS) + conf.getLong(YarnConfiguration.NM_PROCESS_KILL_WAIT_MS, YarnConfiguration.DEFAULT_NM_PROCESS_KILL_WAIT_MS) + SHUTDOWN_CLEANUP_SLOP_MS; super.serviceInit(conf); recover(); } @SuppressWarnings("unchecked") private void recover() throws IOException, URISyntaxException { NMStateStoreService stateStore = context.getNMStateStore(); if (stateStore.canRecover()) { rsrcLocalizationSrvc.recoverLocalizedResources( stateStore.loadLocalizationState()); RecoveredApplicationsState appsState = stateStore.loadApplicationsState(); for (ContainerManagerApplicationProto proto : appsState.getApplications()) { recoverApplication(proto); } for (RecoveredContainerState rcs : stateStore.loadContainersState()) { recoverContainer(rcs); } String diagnostic = "Application marked finished during recovery"; for (ApplicationId appId : appsState.getFinishedApplications()) { dispatcher.getEventHandler().handle( new ApplicationFinishEvent(appId, diagnostic)); } } } private void recoverApplication(ContainerManagerApplicationProto p) throws IOException { ApplicationId appId = new ApplicationIdPBImpl(p.getId()); Credentials creds = new Credentials(); creds.readTokenStorageStream( new DataInputStream(p.getCredentials().newInput())); List<ApplicationACLMapProto> aclProtoList = p.getAclsList(); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(aclProtoList.size()); for (ApplicationACLMapProto aclProto : aclProtoList) { acls.put(ProtoUtils.convertFromProtoFormat(aclProto.getAccessType()), aclProto.getAcl()); } LogAggregationContext logAggregationContext = null; if (p.getLogAggregationContext() != null) { logAggregationContext = new LogAggregationContextPBImpl(p.getLogAggregationContext()); } LOG.info("Recovering application " + appId); ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId, creds, context); context.getApplications().put(appId, app); app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext)); } @SuppressWarnings("unchecked") private void recoverContainer(RecoveredContainerState rcs) throws IOException { StartContainerRequest req = rcs.getStartRequest(); ContainerLaunchContext launchContext = req.getContainerLaunchContext(); ContainerTokenIdentifier token = BuilderUtils.newContainerTokenIdentifier(req.getContainerToken()); ContainerId containerId = token.getContainerID(); ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId(); LOG.info("Recovering " + containerId + " in state " + rcs.getStatus() + " with exit code " + rcs.getExitCode()); if (context.getApplications().containsKey(appId)) { Credentials credentials = parseCredentials(launchContext); Container container = new ContainerImpl(getConfig(), dispatcher, context.getNMStateStore(), req.getContainerLaunchContext(), credentials, metrics, token, rcs.getStatus(), rcs.getExitCode(), rcs.getDiagnostics(), rcs.getKilled()); context.getContainers().put(containerId, container); dispatcher.getEventHandler().handle( new ApplicationContainerInitEvent(container)); } else { if (rcs.getStatus() != RecoveredContainerStatus.COMPLETED) { LOG.warn(containerId + " has no corresponding application!"); } LOG.info("Adding " + containerId + " to recently stopped containers"); nodeStatusUpdater.addCompletedContainer(containerId); } } private void waitForRecoveredContainers() throws InterruptedException { final int sleepMsec = 100; int waitIterations = 100; List<ContainerId> newContainers = new ArrayList<ContainerId>(); while (--waitIterations >= 0) { newContainers.clear(); for (Container container : context.getContainers().values()) { if (container.getContainerState() == org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.NEW) { newContainers.add(container.getContainerId()); } } if (newContainers.isEmpty()) { break; } LOG.info("Waiting for containers: " + newContainers); Thread.sleep(sleepMsec); } if (waitIterations < 0) { LOG.warn("Timeout waiting for recovered containers"); } } protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) { if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { return new LogAggregationService(this.dispatcher, context, deletionService, dirsHandler); } else { return new NonAggregatingLogHandler(this.dispatcher, deletionService, dirsHandler, context.getNMStateStore()); } } public ContainersMonitor getContainersMonitor() { return this.containersMonitor; } protected ResourceLocalizationService createResourceLocalizationService( ContainerExecutor exec, DeletionService deletionContext, Context context) { return new ResourceLocalizationService(this.dispatcher, exec, deletionContext, dirsHandler, context); } protected SharedCacheUploadService createSharedCacheUploaderService() { return new SharedCacheUploadService(); } protected ContainersLauncher createContainersLauncher(Context context, ContainerExecutor exec) { return new ContainersLauncher(context, this.dispatcher, exec, dirsHandler, this); } @Override protected void serviceStart() throws Exception { // Enqueue user dirs in deletion context Configuration conf = getConfig(); final InetSocketAddress initialAddress = conf.getSocketAddr( YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS, YarnConfiguration.DEFAULT_NM_PORT); boolean usingEphemeralPort = (initialAddress.getPort() == 0); if (context.getNMStateStore().canRecover() && usingEphemeralPort) { throw new IllegalArgumentException("Cannot support recovery with an " + "ephemeral server port. Check the setting of " + YarnConfiguration.NM_ADDRESS); } // If recovering then delay opening the RPC service until the recovery // of resources and containers have completed, otherwise requests from // clients during recovery can interfere with the recovery process. final boolean delayedRpcServerStart = context.getNMStateStore().canRecover(); Configuration serverConf = new Configuration(conf); // always enforce it to be token-based. serverConf.set( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, SaslRpcServer.AuthMethod.TOKEN.toString()); YarnRPC rpc = YarnRPC.create(conf); server = rpc.getServer(ContainerManagementProtocol.class, this, initialAddress, serverConf, this.context.getNMTokenSecretManager(), conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { refreshServiceAcls(conf, new NMPolicyProvider()); } LOG.info("Blocking new container-requests as container manager rpc" + " server is still starting."); this.setBlockNewContainerRequests(true); String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST); String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS); String hostOverride = null; if (bindHost != null && !bindHost.isEmpty() && nmAddress != null && !nmAddress.isEmpty()) { //a bind-host case with an address, to support overriding the first //hostname found when querying for our hostname with the specified //address, combine the specified address with the actual port listened //on by the server hostOverride = nmAddress.split(":")[0]; } // setup node ID InetSocketAddress connectAddress; if (delayedRpcServerStart) { connectAddress = NetUtils.getConnectAddress(initialAddress); } else { server.start(); connectAddress = NetUtils.getConnectAddress(server); } NodeId nodeId = buildNodeId(connectAddress, hostOverride); ((NodeManager.NMContext)context).setNodeId(nodeId); this.context.getNMTokenSecretManager().setNodeId(nodeId); this.context.getContainerTokenSecretManager().setNodeId(nodeId); // start remaining services super.serviceStart(); if (delayedRpcServerStart) { waitForRecoveredContainers(); server.start(); // check that the node ID is as previously advertised connectAddress = NetUtils.getConnectAddress(server); NodeId serverNode = buildNodeId(connectAddress, hostOverride); if (!serverNode.equals(nodeId)) { throw new IOException("Node mismatch after server started, expected '" + nodeId + "' but found '" + serverNode + "'"); } } LOG.info("ContainerManager started at " + connectAddress); LOG.info("ContainerManager bound to " + initialAddress); } private NodeId buildNodeId(InetSocketAddress connectAddress, String hostOverride) { if (hostOverride != null) { connectAddress = NetUtils.getConnectAddress( new InetSocketAddress(hostOverride, connectAddress.getPort())); } return NodeId.newInstance( connectAddress.getAddress().getCanonicalHostName(), connectAddress.getPort()); } void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) { this.server.refreshServiceAcl(configuration, policyProvider); } @Override public void serviceStop() throws Exception { setBlockNewContainerRequests(true); this.writeLock.lock(); try { serviceStopped = true; if (context != null) { cleanUpApplicationsOnNMShutDown(); } } finally { this.writeLock.unlock(); } if (auxiliaryServices.getServiceState() == STARTED) { auxiliaryServices.unregisterServiceListener(this); } if (server != null) { server.stop(); } super.serviceStop(); } public void cleanUpApplicationsOnNMShutDown() { Map<ApplicationId, Application> applications = this.context.getApplications(); if (applications.isEmpty()) { return; } LOG.info("Applications still running : " + applications.keySet()); if (this.context.getNMStateStore().canRecover() && !this.context.getDecommissioned()) { if (getConfig().getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED)) { // do not cleanup apps as they can be recovered on restart return; } } List<ApplicationId> appIds = new ArrayList<ApplicationId>(applications.keySet()); this.handle( new CMgrCompletedAppsEvent(appIds, CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN)); LOG.info("Waiting for Applications to be Finished"); long waitStartTime = System.currentTimeMillis(); while (!applications.isEmpty() && System.currentTimeMillis() - waitStartTime < waitForContainersOnShutdownMillis) { try { Thread.sleep(1000); } catch (InterruptedException ex) { LOG.warn( "Interrupted while sleeping on applications finish on shutdown", ex); } } // All applications Finished if (applications.isEmpty()) { LOG.info("All applications in FINISHED state"); } else { LOG.info("Done waiting for Applications to be Finished. Still alive: " + applications.keySet()); } } public void cleanupContainersOnNMResync() { Map<ContainerId, Container> containers = context.getContainers(); if (containers.isEmpty()) { return; } LOG.info("Containers still running on " + CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC + " : " + containers.keySet()); List<ContainerId> containerIds = new ArrayList<ContainerId>(containers.keySet()); LOG.info("Waiting for containers to be killed"); this.handle(new CMgrCompletedContainersEvent(containerIds, CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC)); /* * We will wait till all the containers change their state to COMPLETE. We * will not remove the container statuses from nm context because these * are used while re-registering node manager with resource manager. */ boolean allContainersCompleted = false; while (!containers.isEmpty() && !allContainersCompleted) { allContainersCompleted = true; for (Entry<ContainerId, Container> container : containers.entrySet()) { if (((ContainerImpl) container.getValue()).getCurrentState() != ContainerState.COMPLETE) { allContainersCompleted = false; try { Thread.sleep(1000); } catch (InterruptedException ex) { LOG.warn("Interrupted while sleeping on container kill on resync", ex); } break; } } } // All containers killed if (allContainersCompleted) { LOG.info("All containers in DONE state"); } else { LOG.info("Done waiting for containers to be killed. Still alive: " + containers.keySet()); } } // Get the remoteUGI corresponding to the api call. protected UserGroupInformation getRemoteUgi() throws YarnException { UserGroupInformation remoteUgi; try { remoteUgi = UserGroupInformation.getCurrentUser(); } catch (IOException e) { String msg = "Cannot obtain the user-name. Got exception: " + StringUtils.stringifyException(e); LOG.warn(msg); throw RPCUtil.getRemoteException(msg); } return remoteUgi; } // Obtain the needed ContainerTokenIdentifier from the remote-UGI. RPC layer // currently sets only the required id, but iterate through anyways just to // be sure. @Private @VisibleForTesting protected NMTokenIdentifier selectNMTokenIdentifier( UserGroupInformation remoteUgi) { Set<TokenIdentifier> tokenIdentifiers = remoteUgi.getTokenIdentifiers(); NMTokenIdentifier resultId = null; for (TokenIdentifier id : tokenIdentifiers) { if (id instanceof NMTokenIdentifier) { resultId = (NMTokenIdentifier) id; break; } } return resultId; } protected void authorizeUser(UserGroupInformation remoteUgi, NMTokenIdentifier nmTokenIdentifier) throws YarnException { if (nmTokenIdentifier == null) { throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG); } if (!remoteUgi.getUserName().equals( nmTokenIdentifier.getApplicationAttemptId().toString())) { throw RPCUtil.getRemoteException("Expected applicationAttemptId: " + remoteUgi.getUserName() + "Found: " + nmTokenIdentifier.getApplicationAttemptId()); } } /** * @param containerTokenIdentifier * of the container to be started * @throws YarnException */ @Private @VisibleForTesting protected void authorizeStartRequest(NMTokenIdentifier nmTokenIdentifier, ContainerTokenIdentifier containerTokenIdentifier) throws YarnException { if (nmTokenIdentifier == null) { throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG); } if (containerTokenIdentifier == null) { throw RPCUtil.getRemoteException(INVALID_CONTAINERTOKEN_MSG); } ContainerId containerId = containerTokenIdentifier.getContainerID(); String containerIDStr = containerId.toString(); boolean unauthorized = false; StringBuilder messageBuilder = new StringBuilder("Unauthorized request to start container. "); if (!nmTokenIdentifier.getApplicationAttemptId().getApplicationId(). equals(containerId.getApplicationAttemptId().getApplicationId())) { unauthorized = true; messageBuilder.append("\nNMToken for application attempt : ") .append(nmTokenIdentifier.getApplicationAttemptId()) .append(" was used for starting container with container token") .append(" issued for application attempt : ") .append(containerId.getApplicationAttemptId()); } else if (!this.context.getContainerTokenSecretManager() .isValidStartContainerRequest(containerTokenIdentifier)) { // Is the container being relaunched? Or RPC layer let startCall with // tokens generated off old-secret through? unauthorized = true; messageBuilder.append("\n Attempt to relaunch the same ") .append("container with id ").append(containerIDStr).append("."); } else if (containerTokenIdentifier.getExpiryTimeStamp() < System .currentTimeMillis()) { // Ensure the token is not expired. unauthorized = true; messageBuilder.append("\nThis token is expired. current time is ") .append(System.currentTimeMillis()).append(" found ") .append(containerTokenIdentifier.getExpiryTimeStamp()); messageBuilder.append("\nNote: System times on machines may be out of sync.") .append(" Check system time and time zones."); } if (unauthorized) { String msg = messageBuilder.toString(); LOG.error(msg); throw RPCUtil.getRemoteException(msg); } } /** * Start a list of containers on this NodeManager. */ @Override public StartContainersResponse startContainers(StartContainersRequest requests) throws YarnException, IOException { if (blockNewContainerRequests.get()) { throw new NMNotYetReadyException( "Rejecting new containers as NodeManager has not" + " yet connected with ResourceManager"); } UserGroupInformation remoteUgi = getRemoteUgi(); NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi); authorizeUser(remoteUgi,nmTokenIdentifier); List<ContainerId> succeededContainers = new ArrayList<ContainerId>(); Map<ContainerId, SerializedException> failedContainers = new HashMap<ContainerId, SerializedException>(); for (StartContainerRequest request : requests.getStartContainerRequests()) { ContainerId containerId = null; try { if (request.getContainerToken() == null || request.getContainerToken().getIdentifier() == null) { throw new IOException(INVALID_CONTAINERTOKEN_MSG); } ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils.newContainerTokenIdentifier(request.getContainerToken()); verifyAndGetContainerTokenIdentifier(request.getContainerToken(), containerTokenIdentifier); containerId = containerTokenIdentifier.getContainerID(); startContainerInternal(nmTokenIdentifier, containerTokenIdentifier, request); succeededContainers.add(containerId); } catch (YarnException e) { failedContainers.put(containerId, SerializedException.newInstance(e)); } catch (InvalidToken ie) { failedContainers.put(containerId, SerializedException.newInstance(ie)); throw ie; } catch (IOException e) { throw RPCUtil.getRemoteException(e); } } return StartContainersResponse.newInstance(getAuxServiceMetaData(), succeededContainers, failedContainers); } private ContainerManagerApplicationProto buildAppProto(ApplicationId appId, String user, Credentials credentials, Map<ApplicationAccessType, String> appAcls, LogAggregationContext logAggregationContext) { ContainerManagerApplicationProto.Builder builder = ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl) appId).getProto()); builder.setUser(user); if (logAggregationContext != null) { builder.setLogAggregationContext(( (LogAggregationContextPBImpl)logAggregationContext).getProto()); } builder.clearCredentials(); if (credentials != null) { DataOutputBuffer dob = new DataOutputBuffer(); try { credentials.writeTokenStorageToStream(dob); builder.setCredentials(ByteString.copyFrom(dob.getData())); } catch (IOException e) { // should not occur LOG.error("Cannot serialize credentials", e); } } builder.clearAcls(); if (appAcls != null) { for (Map.Entry<ApplicationAccessType, String> acl : appAcls.entrySet()) { ApplicationACLMapProto p = ApplicationACLMapProto.newBuilder() .setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey())) .setAcl(acl.getValue()) .build(); builder.addAcls(p); } } return builder.build(); } @SuppressWarnings("unchecked") private void startContainerInternal(NMTokenIdentifier nmTokenIdentifier, ContainerTokenIdentifier containerTokenIdentifier, StartContainerRequest request) throws YarnException, IOException { /* * 1) It should save the NMToken into NMTokenSecretManager. This is done * here instead of RPC layer because at the time of opening/authenticating * the connection it doesn't know what all RPC calls user will make on it. * Also new NMToken is issued only at startContainer (once it gets renewed). * * 2) It should validate containerToken. Need to check below things. a) It * is signed by correct master key (part of retrieve password). b) It * belongs to correct Node Manager (part of retrieve password). c) It has * correct RMIdentifier. d) It is not expired. */ authorizeStartRequest(nmTokenIdentifier, containerTokenIdentifier); if (containerTokenIdentifier.getRMIdentifier() != nodeStatusUpdater .getRMIdentifier()) { // Is the container coming from unknown RM StringBuilder sb = new StringBuilder("\nContainer "); sb.append(containerTokenIdentifier.getContainerID().toString()) .append(" rejected as it is allocated by a previous RM"); throw new InvalidContainerException(sb.toString()); } // update NMToken updateNMTokenIdentifier(nmTokenIdentifier); ContainerId containerId = containerTokenIdentifier.getContainerID(); String containerIdStr = containerId.toString(); String user = containerTokenIdentifier.getApplicationSubmitter(); LOG.info("Start request for " + containerIdStr + " by user " + user); ContainerLaunchContext launchContext = request.getContainerLaunchContext(); Map<String, ByteBuffer> serviceData = getAuxServiceMetaData(); if (launchContext.getServiceData()!=null && !launchContext.getServiceData().isEmpty()) { for (Map.Entry<String, ByteBuffer> meta : launchContext.getServiceData() .entrySet()) { if (null == serviceData.get(meta.getKey())) { throw new InvalidAuxServiceException("The auxService:" + meta.getKey() + " does not exist"); } } } Credentials credentials = parseCredentials(launchContext); Container container = new ContainerImpl(getConfig(), this.dispatcher, context.getNMStateStore(), launchContext, credentials, metrics, containerTokenIdentifier); ApplicationId applicationID = containerId.getApplicationAttemptId().getApplicationId(); if (context.getContainers().putIfAbsent(containerId, container) != null) { NMAuditLogger.logFailure(user, AuditConstants.START_CONTAINER, "ContainerManagerImpl", "Container already running on this node!", applicationID, containerId); throw RPCUtil.getRemoteException("Container " + containerIdStr + " already is running on this node!!"); } this.readLock.lock(); try { if (!serviceStopped) { // Create the application Application application = new ApplicationImpl(dispatcher, user, applicationID, credentials, context); if (null == context.getApplications().putIfAbsent(applicationID, application)) { LOG.info("Creating a new application reference for app " + applicationID); LogAggregationContext logAggregationContext = containerTokenIdentifier.getLogAggregationContext(); Map<ApplicationAccessType, String> appAcls = container.getLaunchContext().getApplicationACLs(); context.getNMStateStore().storeApplication(applicationID, buildAppProto(applicationID, user, credentials, appAcls, logAggregationContext)); dispatcher.getEventHandler().handle( new ApplicationInitEvent(applicationID, appAcls, logAggregationContext)); } this.context.getNMStateStore().storeContainer(containerId, request); dispatcher.getEventHandler().handle( new ApplicationContainerInitEvent(container)); this.context.getContainerTokenSecretManager().startContainerSuccessful( containerTokenIdentifier); NMAuditLogger.logSuccess(user, AuditConstants.START_CONTAINER, "ContainerManageImpl", applicationID, containerId); // TODO launchedContainer misplaced -> doesn't necessarily mean a container // launch. A finished Application will not launch containers. metrics.launchedContainer(); metrics.allocateContainer(containerTokenIdentifier.getResource()); } else { throw new YarnException( "Container start failed as the NodeManager is " + "in the process of shutting down"); } } finally { this.readLock.unlock(); } } protected ContainerTokenIdentifier verifyAndGetContainerTokenIdentifier( org.apache.hadoop.yarn.api.records.Token token, ContainerTokenIdentifier containerTokenIdentifier) throws YarnException, InvalidToken { byte[] password = context.getContainerTokenSecretManager().retrievePassword( containerTokenIdentifier); byte[] tokenPass = token.getPassword().array(); if (password == null || tokenPass == null || !Arrays.equals(password, tokenPass)) { throw new InvalidToken( "Invalid container token used for starting container on : " + context.getNodeId().toString()); } return containerTokenIdentifier; } @Private @VisibleForTesting protected void updateNMTokenIdentifier(NMTokenIdentifier nmTokenIdentifier) throws InvalidToken { context.getNMTokenSecretManager().appAttemptStartContainer( nmTokenIdentifier); } private Credentials parseCredentials(ContainerLaunchContext launchContext) throws IOException { Credentials credentials = new Credentials(); // //////////// Parse credentials ByteBuffer tokens = launchContext.getTokens(); if (tokens != null) { DataInputByteBuffer buf = new DataInputByteBuffer(); tokens.rewind(); buf.reset(tokens); credentials.readTokenStorageStream(buf); if (LOG.isDebugEnabled()) { for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) { LOG.debug(tk.getService() + " = " + tk.toString()); } } } // //////////// End of parsing credentials return credentials; } /** * Stop a list of containers running on this NodeManager. */ @Override public StopContainersResponse stopContainers(StopContainersRequest requests) throws YarnException, IOException { List<ContainerId> succeededRequests = new ArrayList<ContainerId>(); Map<ContainerId, SerializedException> failedRequests = new HashMap<ContainerId, SerializedException>(); UserGroupInformation remoteUgi = getRemoteUgi(); NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi); if (identifier == null) { throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG); } for (ContainerId id : requests.getContainerIds()) { try { stopContainerInternal(identifier, id); succeededRequests.add(id); } catch (YarnException e) { failedRequests.put(id, SerializedException.newInstance(e)); } } return StopContainersResponse .newInstance(succeededRequests, failedRequests); } @SuppressWarnings("unchecked") private void stopContainerInternal(NMTokenIdentifier nmTokenIdentifier, ContainerId containerID) throws YarnException, IOException { String containerIDStr = containerID.toString(); Container container = this.context.getContainers().get(containerID); LOG.info("Stopping container with container Id: " + containerIDStr); authorizeGetAndStopContainerRequest(containerID, container, true, nmTokenIdentifier); if (container == null) { if (!nodeStatusUpdater.isContainerRecentlyStopped(containerID)) { throw RPCUtil.getRemoteException("Container " + containerIDStr + " is not handled by this NodeManager"); } } else { context.getNMStateStore().storeContainerKilled(containerID); dispatcher.getEventHandler().handle( new ContainerKillEvent(containerID, ContainerExitStatus.KILLED_BY_APPMASTER, "Container killed by the ApplicationMaster.")); NMAuditLogger.logSuccess(container.getUser(), AuditConstants.STOP_CONTAINER, "ContainerManageImpl", containerID .getApplicationAttemptId().getApplicationId(), containerID); // TODO: Move this code to appropriate place once kill_container is // implemented. nodeStatusUpdater.sendOutofBandHeartBeat(); } } /** * Get a list of container statuses running on this NodeManager */ @Override public GetContainerStatusesResponse getContainerStatuses( GetContainerStatusesRequest request) throws YarnException, IOException { List<ContainerStatus> succeededRequests = new ArrayList<ContainerStatus>(); Map<ContainerId, SerializedException> failedRequests = new HashMap<ContainerId, SerializedException>(); UserGroupInformation remoteUgi = getRemoteUgi(); NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi); if (identifier == null) { throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG); } for (ContainerId id : request.getContainerIds()) { try { ContainerStatus status = getContainerStatusInternal(id, identifier); succeededRequests.add(status); } catch (YarnException e) { failedRequests.put(id, SerializedException.newInstance(e)); } } return GetContainerStatusesResponse.newInstance(succeededRequests, failedRequests); } private ContainerStatus getContainerStatusInternal(ContainerId containerID, NMTokenIdentifier nmTokenIdentifier) throws YarnException { String containerIDStr = containerID.toString(); Container container = this.context.getContainers().get(containerID); LOG.info("Getting container-status for " + containerIDStr); authorizeGetAndStopContainerRequest(containerID, container, false, nmTokenIdentifier); if (container == null) { if (nodeStatusUpdater.isContainerRecentlyStopped(containerID)) { throw RPCUtil.getRemoteException("Container " + containerIDStr + " was recently stopped on node manager."); } else { throw RPCUtil.getRemoteException("Container " + containerIDStr + " is not handled by this NodeManager"); } } ContainerStatus containerStatus = container.cloneAndGetContainerStatus(); LOG.info("Returning " + containerStatus); return containerStatus; } @Private @VisibleForTesting protected void authorizeGetAndStopContainerRequest(ContainerId containerId, Container container, boolean stopRequest, NMTokenIdentifier identifier) throws YarnException { if (identifier == null) { throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG); } /* * For get/stop container status; we need to verify that 1) User (NMToken) * application attempt only has started container. 2) Requested containerId * belongs to the same application attempt (NMToken) which was used. (Note:- * This will prevent user in knowing another application's containers). */ ApplicationId nmTokenAppId = identifier.getApplicationAttemptId().getApplicationId(); if ((!nmTokenAppId.equals(containerId.getApplicationAttemptId().getApplicationId())) || (container != null && !nmTokenAppId.equals(container .getContainerId().getApplicationAttemptId().getApplicationId()))) { if (stopRequest) { LOG.warn(identifier.getApplicationAttemptId() + " attempted to stop non-application container : " + container.getContainerId()); NMAuditLogger.logFailure("UnknownUser", AuditConstants.STOP_CONTAINER, "ContainerManagerImpl", "Trying to stop unknown container!", nmTokenAppId, container.getContainerId()); } else { LOG.warn(identifier.getApplicationAttemptId() + " attempted to get status for non-application container : " + container.getContainerId()); } } } class ContainerEventDispatcher implements EventHandler<ContainerEvent> { @Override public void handle(ContainerEvent event) { Map<ContainerId,Container> containers = ContainerManagerImpl.this.context.getContainers(); Container c = containers.get(event.getContainerID()); if (c != null) { c.handle(event); } else { LOG.warn("Event " + event + " sent to absent container " + event.getContainerID()); } } } class ApplicationEventDispatcher implements EventHandler<ApplicationEvent> { @Override public void handle(ApplicationEvent event) { Application app = ContainerManagerImpl.this.context.getApplications().get( event.getApplicationID()); if (app != null) { app.handle(event); } else { LOG.warn("Event " + event + " sent to absent application " + event.getApplicationID()); } } } @SuppressWarnings("unchecked") @Override public void handle(ContainerManagerEvent event) { switch (event.getType()) { case FINISH_APPS: CMgrCompletedAppsEvent appsFinishedEvent = (CMgrCompletedAppsEvent) event; for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) { String diagnostic = ""; if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN) { diagnostic = "Application killed on shutdown"; } else if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER) { diagnostic = "Application killed by ResourceManager"; } try { this.context.getNMStateStore().storeFinishedApplication(appID); } catch (IOException e) { LOG.error("Unable to update application state in store", e); } this.dispatcher.getEventHandler().handle( new ApplicationFinishEvent(appID, diagnostic)); } break; case FINISH_CONTAINERS: CMgrCompletedContainersEvent containersFinishedEvent = (CMgrCompletedContainersEvent) event; for (ContainerId container : containersFinishedEvent .getContainersToCleanup()) { this.dispatcher.getEventHandler().handle( new ContainerKillEvent(container, ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, "Container Killed by ResourceManager")); } break; default: throw new YarnRuntimeException( "Got an unknown ContainerManagerEvent type: " + event.getType()); } } public void setBlockNewContainerRequests(boolean blockNewContainerRequests) { this.blockNewContainerRequests.set(blockNewContainerRequests); } @Private @VisibleForTesting public boolean getBlockNewContainerRequestsStatus() { return this.blockNewContainerRequests.get(); } @Override public void stateChanged(Service service) { // TODO Auto-generated method stub } public Context getContext() { return this.context; } public Map<String, ByteBuffer> getAuxServiceMetaData() { return this.auxiliaryServices.getMetaData(); } }
49,575
40.520938
135
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerLocalization.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; public interface ContainerLocalization { }
903
40.090909
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.api.AuxiliaryService; import org.apache.hadoop.yarn.server.api.ContainerInitializationContext; import org.apache.hadoop.yarn.server.api.ContainerTerminationContext; import com.google.common.base.Preconditions; public class AuxServices extends AbstractService implements ServiceStateChangeListener, EventHandler<AuxServicesEvent> { static final String STATE_STORE_ROOT_NAME = "nm-aux-services"; private static final Log LOG = LogFactory.getLog(AuxServices.class); protected final Map<String,AuxiliaryService> serviceMap; protected final Map<String,ByteBuffer> serviceMetaData; private final Pattern p = Pattern.compile("^[A-Za-z_]+[A-Za-z0-9_]*$"); public AuxServices() { super(AuxServices.class.getName()); serviceMap = Collections.synchronizedMap(new HashMap<String,AuxiliaryService>()); serviceMetaData = Collections.synchronizedMap(new HashMap<String,ByteBuffer>()); // Obtain services from configuration in init() } protected final synchronized void addService(String name, AuxiliaryService service) { LOG.info("Adding auxiliary service " + service.getName() + ", \"" + name + "\""); serviceMap.put(name, service); } Collection<AuxiliaryService> getServices() { return Collections.unmodifiableCollection(serviceMap.values()); } /** * @return the meta data for all registered services, that have been started. * If a service has not been started no metadata will be available. The key * is the name of the service as defined in the configuration. */ public Map<String, ByteBuffer> getMetaData() { Map<String, ByteBuffer> metaClone = new HashMap<String, ByteBuffer>( serviceMetaData.size()); synchronized (serviceMetaData) { for (Entry<String, ByteBuffer> entry : serviceMetaData.entrySet()) { metaClone.put(entry.getKey(), entry.getValue().duplicate()); } } return metaClone; } @Override public void serviceInit(Configuration conf) throws Exception { final FsPermission storeDirPerms = new FsPermission((short)0700); Path stateStoreRoot = null; FileSystem stateStoreFs = null; boolean recoveryEnabled = conf.getBoolean( YarnConfiguration.NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED); if (recoveryEnabled) { stateStoreRoot = new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR), STATE_STORE_ROOT_NAME); stateStoreFs = FileSystem.getLocal(conf); } Collection<String> auxNames = conf.getStringCollection( YarnConfiguration.NM_AUX_SERVICES); for (final String sName : auxNames) { try { Preconditions .checkArgument( validateAuxServiceName(sName), "The ServiceName: " + sName + " set in " + YarnConfiguration.NM_AUX_SERVICES +" is invalid." + "The valid service name should only contain a-zA-Z0-9_ " + "and can not start with numbers"); Class<? extends AuxiliaryService> sClass = conf.getClass( String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, sName), null, AuxiliaryService.class); if (null == sClass) { throw new RuntimeException("No class defined for " + sName); } AuxiliaryService s = ReflectionUtils.newInstance(sClass, conf); // TODO better use s.getName()? if(!sName.equals(s.getName())) { LOG.warn("The Auxilurary Service named '"+sName+"' in the " +"configuration is for "+sClass+" which has " +"a name of '"+s.getName()+"'. Because these are " +"not the same tools trying to send ServiceData and read " +"Service Meta Data may have issues unless the refer to " +"the name in the config."); } addService(sName, s); if (recoveryEnabled) { Path storePath = new Path(stateStoreRoot, sName); stateStoreFs.mkdirs(storePath, storeDirPerms); s.setRecoveryPath(storePath); } s.init(conf); } catch (RuntimeException e) { LOG.fatal("Failed to initialize " + sName, e); throw e; } } super.serviceInit(conf); } @Override public void serviceStart() throws Exception { // TODO fork(?) services running as configured user // monitor for health, shutdown/restart(?) if any should die for (Map.Entry<String, AuxiliaryService> entry : serviceMap.entrySet()) { AuxiliaryService service = entry.getValue(); String name = entry.getKey(); service.start(); service.registerServiceListener(this); ByteBuffer meta = service.getMetaData(); if(meta != null) { serviceMetaData.put(name, meta); } } super.serviceStart(); } @Override public void serviceStop() throws Exception { try { synchronized (serviceMap) { for (Service service : serviceMap.values()) { if (service.getServiceState() == Service.STATE.STARTED) { service.unregisterServiceListener(this); service.stop(); } } serviceMap.clear(); serviceMetaData.clear(); } } finally { super.serviceStop(); } } @Override public void stateChanged(Service service) { LOG.fatal("Service " + service.getName() + " changed state: " + service.getServiceState()); stop(); } @Override public void handle(AuxServicesEvent event) { LOG.info("Got event " + event.getType() + " for appId " + event.getApplicationID()); switch (event.getType()) { case APPLICATION_INIT: LOG.info("Got APPLICATION_INIT for service " + event.getServiceID()); AuxiliaryService service = null; try { service = serviceMap.get(event.getServiceID()); service .initializeApplication(new ApplicationInitializationContext(event .getUser(), event.getApplicationID(), event.getServiceData())); } catch (Throwable th) { logWarningWhenAuxServiceThrowExceptions(service, AuxServicesEventType.APPLICATION_INIT, th); } break; case APPLICATION_STOP: for (AuxiliaryService serv : serviceMap.values()) { try { serv.stopApplication(new ApplicationTerminationContext(event .getApplicationID())); } catch (Throwable th) { logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.APPLICATION_STOP, th); } } break; case CONTAINER_INIT: for (AuxiliaryService serv : serviceMap.values()) { try { serv.initializeContainer(new ContainerInitializationContext( event.getUser(), event.getContainer().getContainerId(), event.getContainer().getResource(), event.getContainer() .getContainerTokenIdentifier().getContainerType())); } catch (Throwable th) { logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.CONTAINER_INIT, th); } } break; case CONTAINER_STOP: for (AuxiliaryService serv : serviceMap.values()) { try { serv.stopContainer(new ContainerTerminationContext( event.getUser(), event.getContainer().getContainerId(), event.getContainer().getResource(), event.getContainer() .getContainerTokenIdentifier().getContainerType())); } catch (Throwable th) { logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.CONTAINER_STOP, th); } } break; default: throw new RuntimeException("Unknown type: " + event.getType()); } } private boolean validateAuxServiceName(String name) { if (name == null || name.trim().isEmpty()) { return false; } return p.matcher(name).matches(); } private void logWarningWhenAuxServiceThrowExceptions(AuxiliaryService service, AuxServicesEventType eventType, Throwable th) { LOG.warn((null == service ? "The auxService is null" : "The auxService name is " + service.getName()) + " and it got an error at event: " + eventType, th); } }
10,182
36.996269
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; public enum ApplicationState { NEW, INITING, RUNNING, FINISHING_CONTAINERS_WAIT, APPLICATION_RESOURCES_CLEANINGUP, FINISHED }
1,002
40.791667
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerInitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogAggregationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.MultipleArcTransition; import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; import com.google.common.annotations.VisibleForTesting; /** * The state machine for the representation of an Application * within the NodeManager. */ public class ApplicationImpl implements Application { final Dispatcher dispatcher; final String user; final ApplicationId appId; final Credentials credentials; Map<ApplicationAccessType, String> applicationACLs; final ApplicationACLsManager aclsManager; private final ReadLock readLock; private final WriteLock writeLock; private final Context context; private static final Log LOG = LogFactory.getLog(ApplicationImpl.class); private LogAggregationContext logAggregationContext; Map<ContainerId, Container> containers = new HashMap<ContainerId, Container>(); public ApplicationImpl(Dispatcher dispatcher, String user, ApplicationId appId, Credentials credentials, Context context) { this.dispatcher = dispatcher; this.user = user; this.appId = appId; this.credentials = credentials; this.aclsManager = context.getApplicationACLsManager(); this.context = context; ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); writeLock = lock.writeLock(); stateMachine = stateMachineFactory.make(this); } @Override public String getUser() { return user.toString(); } @Override public ApplicationId getAppId() { return appId; } @Override public ApplicationState getApplicationState() { this.readLock.lock(); try { return this.stateMachine.getCurrentState(); } finally { this.readLock.unlock(); } } @Override public Map<ContainerId, Container> getContainers() { this.readLock.lock(); try { return this.containers; } finally { this.readLock.unlock(); } } private static final ContainerDoneTransition CONTAINER_DONE_TRANSITION = new ContainerDoneTransition(); private static StateMachineFactory<ApplicationImpl, ApplicationState, ApplicationEventType, ApplicationEvent> stateMachineFactory = new StateMachineFactory<ApplicationImpl, ApplicationState, ApplicationEventType, ApplicationEvent>(ApplicationState.NEW) // Transitions from NEW state .addTransition(ApplicationState.NEW, ApplicationState.INITING, ApplicationEventType.INIT_APPLICATION, new AppInitTransition()) .addTransition(ApplicationState.NEW, ApplicationState.NEW, ApplicationEventType.INIT_CONTAINER, new InitContainerTransition()) // Transitions from INITING state .addTransition(ApplicationState.INITING, ApplicationState.INITING, ApplicationEventType.INIT_CONTAINER, new InitContainerTransition()) .addTransition(ApplicationState.INITING, EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP), ApplicationEventType.FINISH_APPLICATION, new AppFinishTriggeredTransition()) .addTransition(ApplicationState.INITING, ApplicationState.INITING, ApplicationEventType.APPLICATION_CONTAINER_FINISHED, CONTAINER_DONE_TRANSITION) .addTransition(ApplicationState.INITING, ApplicationState.INITING, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED, new AppLogInitDoneTransition()) .addTransition(ApplicationState.INITING, ApplicationState.INITING, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED, new AppLogInitFailTransition()) .addTransition(ApplicationState.INITING, ApplicationState.RUNNING, ApplicationEventType.APPLICATION_INITED, new AppInitDoneTransition()) // Transitions from RUNNING state .addTransition(ApplicationState.RUNNING, ApplicationState.RUNNING, ApplicationEventType.INIT_CONTAINER, new InitContainerTransition()) .addTransition(ApplicationState.RUNNING, ApplicationState.RUNNING, ApplicationEventType.APPLICATION_CONTAINER_FINISHED, CONTAINER_DONE_TRANSITION) .addTransition( ApplicationState.RUNNING, EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP), ApplicationEventType.FINISH_APPLICATION, new AppFinishTriggeredTransition()) // Transitions from FINISHING_CONTAINERS_WAIT state. .addTransition( ApplicationState.FINISHING_CONTAINERS_WAIT, EnumSet.of(ApplicationState.FINISHING_CONTAINERS_WAIT, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP), ApplicationEventType.APPLICATION_CONTAINER_FINISHED, new AppFinishTransition()) .addTransition(ApplicationState.FINISHING_CONTAINERS_WAIT, ApplicationState.FINISHING_CONTAINERS_WAIT, EnumSet.of( ApplicationEventType.APPLICATION_LOG_HANDLING_INITED, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED, ApplicationEventType.APPLICATION_INITED, ApplicationEventType.FINISH_APPLICATION)) // Transitions from APPLICATION_RESOURCES_CLEANINGUP state .addTransition(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, ApplicationEventType.APPLICATION_CONTAINER_FINISHED) .addTransition(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, ApplicationState.FINISHED, ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP, new AppCompletelyDoneTransition()) .addTransition(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, EnumSet.of( ApplicationEventType.APPLICATION_LOG_HANDLING_INITED, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED, ApplicationEventType.APPLICATION_INITED, ApplicationEventType.FINISH_APPLICATION)) // Transitions from FINISHED state .addTransition(ApplicationState.FINISHED, ApplicationState.FINISHED, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED, new AppLogsAggregatedTransition()) .addTransition(ApplicationState.FINISHED, ApplicationState.FINISHED, EnumSet.of( ApplicationEventType.APPLICATION_LOG_HANDLING_INITED, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED, ApplicationEventType.FINISH_APPLICATION)) // create the topology tables .installTopology(); private final StateMachine<ApplicationState, ApplicationEventType, ApplicationEvent> stateMachine; /** * Notify services of new application. * * In particular, this initializes the {@link LogAggregationService} */ @SuppressWarnings("unchecked") static class AppInitTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { ApplicationInitEvent initEvent = (ApplicationInitEvent)event; app.applicationACLs = initEvent.getApplicationACLs(); app.aclsManager.addApplication(app.getAppId(), app.applicationACLs); // Inform the logAggregator app.logAggregationContext = initEvent.getLogAggregationContext(); app.dispatcher.getEventHandler().handle( new LogHandlerAppStartedEvent(app.appId, app.user, app.credentials, ContainerLogsRetentionPolicy.ALL_CONTAINERS, app.applicationACLs, app.logAggregationContext)); } } /** * Handles the APPLICATION_LOG_HANDLING_INITED event that occurs after * {@link LogAggregationService} has created the directories for the app * and started the aggregation thread for the app. * * In particular, this requests that the {@link ResourceLocalizationService} * localize the application-scoped resources. */ @SuppressWarnings("unchecked") static class AppLogInitDoneTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { app.dispatcher.getEventHandler().handle( new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); } } /** * Handles the APPLICATION_LOG_HANDLING_FAILED event that occurs after * {@link LogAggregationService} has failed to initialize the log * aggregation service * * In particular, this requests that the {@link ResourceLocalizationService} * localize the application-scoped resources. */ @SuppressWarnings("unchecked") static class AppLogInitFailTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { LOG.warn("Log Aggregation service failed to initialize, there will " + "be no logs for this application"); app.dispatcher.getEventHandler().handle( new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); } } /** * Handles INIT_CONTAINER events which request that we launch a new * container. When we're still in the INITTING state, we simply * queue these up. When we're in the RUNNING state, we pass along * an ContainerInitEvent to the appropriate ContainerImpl. */ @SuppressWarnings("unchecked") static class InitContainerTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { ApplicationContainerInitEvent initEvent = (ApplicationContainerInitEvent) event; Container container = initEvent.getContainer(); app.containers.put(container.getContainerId(), container); LOG.info("Adding " + container.getContainerId() + " to application " + app.toString()); switch (app.getApplicationState()) { case RUNNING: app.dispatcher.getEventHandler().handle(new ContainerInitEvent( container.getContainerId())); break; case INITING: case NEW: // these get queued up and sent out in AppInitDoneTransition break; default: assert false : "Invalid state for InitContainerTransition: " + app.getApplicationState(); } } } @SuppressWarnings("unchecked") static class AppInitDoneTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { // Start all the containers waiting for ApplicationInit for (Container container : app.containers.values()) { app.dispatcher.getEventHandler().handle(new ContainerInitEvent( container.getContainerId())); } } } static final class ContainerDoneTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { ApplicationContainerFinishedEvent containerEvent = (ApplicationContainerFinishedEvent) event; if (null == app.containers.remove(containerEvent.getContainerID())) { LOG.warn("Removing unknown " + containerEvent.getContainerID() + " from application " + app.toString()); } else { LOG.info("Removing " + containerEvent.getContainerID() + " from application " + app.toString()); } } } @SuppressWarnings("unchecked") void handleAppFinishWithContainersCleanedup() { // Delete Application level resources this.dispatcher.getEventHandler().handle( new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, this)); // tell any auxiliary services that the app is done this.dispatcher.getEventHandler().handle( new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP, appId)); // TODO: Trigger the LogsManager } @SuppressWarnings("unchecked") static class AppFinishTriggeredTransition implements MultipleArcTransition<ApplicationImpl, ApplicationEvent, ApplicationState> { @Override public ApplicationState transition(ApplicationImpl app, ApplicationEvent event) { ApplicationFinishEvent appEvent = (ApplicationFinishEvent)event; if (app.containers.isEmpty()) { // No container to cleanup. Cleanup app level resources. app.handleAppFinishWithContainersCleanedup(); return ApplicationState.APPLICATION_RESOURCES_CLEANINGUP; } // Send event to ContainersLauncher to finish all the containers of this // application. for (ContainerId containerID : app.containers.keySet()) { app.dispatcher.getEventHandler().handle( new ContainerKillEvent(containerID, ContainerExitStatus.KILLED_AFTER_APP_COMPLETION, "Container killed on application-finish event: " + appEvent.getDiagnostic())); } return ApplicationState.FINISHING_CONTAINERS_WAIT; } } static class AppFinishTransition implements MultipleArcTransition<ApplicationImpl, ApplicationEvent, ApplicationState> { @Override public ApplicationState transition(ApplicationImpl app, ApplicationEvent event) { ApplicationContainerFinishedEvent containerFinishEvent = (ApplicationContainerFinishedEvent) event; LOG.info("Removing " + containerFinishEvent.getContainerID() + " from application " + app.toString()); app.containers.remove(containerFinishEvent.getContainerID()); if (app.containers.isEmpty()) { // All containers are cleanedup. app.handleAppFinishWithContainersCleanedup(); return ApplicationState.APPLICATION_RESOURCES_CLEANINGUP; } return ApplicationState.FINISHING_CONTAINERS_WAIT; } } @SuppressWarnings("unchecked") static class AppCompletelyDoneTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { // Inform the logService app.dispatcher.getEventHandler().handle( new LogHandlerAppFinishedEvent(app.appId)); app.context.getNMTokenSecretManager().appFinished(app.getAppId()); } } static class AppLogsAggregatedTransition implements SingleArcTransition<ApplicationImpl, ApplicationEvent> { @Override public void transition(ApplicationImpl app, ApplicationEvent event) { ApplicationId appId = event.getApplicationID(); app.context.getApplications().remove(appId); app.aclsManager.removeApplication(appId); try { app.context.getNMStateStore().removeApplication(appId); } catch (IOException e) { LOG.error("Unable to remove application from state store", e); } } } @Override public void handle(ApplicationEvent event) { this.writeLock.lock(); try { ApplicationId applicationID = event.getApplicationID(); LOG.debug("Processing " + applicationID + " of type " + event.getType()); ApplicationState oldState = stateMachine.getCurrentState(); ApplicationState newState = null; try { // queue event requesting init of the same app newState = stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.warn("Can't handle this event at current state", e); } if (oldState != newState) { LOG.info("Application " + applicationID + " transitioned from " + oldState + " to " + newState); } } finally { this.writeLock.unlock(); } } @Override public String toString() { return appId.toString(); } @VisibleForTesting public LogAggregationContext getLogAggregationContext() { try { this.readLock.lock(); return this.logAggregationContext; } finally { this.readLock.unlock(); } } }
19,991
40.051335
111
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import org.apache.hadoop.yarn.api.records.ContainerId; public class ApplicationContainerFinishedEvent extends ApplicationEvent { private ContainerId containerID; public ApplicationContainerFinishedEvent( ContainerId containerID) { super(containerID.getApplicationAttemptId().getApplicationId(), ApplicationEventType.APPLICATION_CONTAINER_FINISHED); this.containerID = containerID; } public ContainerId getContainerID() { return this.containerID; } }
1,362
35.837838
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import java.util.Map; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.LogAggregationContext; public class ApplicationInitEvent extends ApplicationEvent { private final Map<ApplicationAccessType, String> applicationACLs; private final LogAggregationContext logAggregationContext; public ApplicationInitEvent(ApplicationId appId, Map<ApplicationAccessType, String> acls) { this(appId, acls, null); } public ApplicationInitEvent(ApplicationId appId, Map<ApplicationAccessType, String> acls, LogAggregationContext logAggregationContext) { super(appId, ApplicationEventType.INIT_APPLICATION); this.applicationACLs = acls; this.logAggregationContext = logAggregationContext; } public Map<ApplicationAccessType, String> getApplicationACLs() { return this.applicationACLs; } public LogAggregationContext getLogAggregationContext() { return this.logAggregationContext; } }
1,924
35.320755
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.event.AbstractEvent; public class ApplicationEvent extends AbstractEvent<ApplicationEventType> { private final ApplicationId applicationID; public ApplicationEvent(ApplicationId appID, ApplicationEventType appEventType) { super(appEventType); this.applicationID = appID; } public ApplicationId getApplicationID() { return this.applicationID; } }
1,339
33.358974
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/Application.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import java.util.Map; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; public interface Application extends EventHandler<ApplicationEvent> { String getUser(); Map<ContainerId, Container> getContainers(); ApplicationId getAppId(); ApplicationState getApplicationState(); }
1,360
33.897436
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import org.apache.hadoop.yarn.api.records.ApplicationId; public class ApplicationInitedEvent extends ApplicationEvent { public ApplicationInitedEvent(ApplicationId appID) { super(appID, ApplicationEventType.APPLICATION_INITED); } }
1,115
37.482759
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; public enum ApplicationEventType { // Source: ContainerManager INIT_APPLICATION, INIT_CONTAINER, FINISH_APPLICATION, // Source: LogAggregationService if init fails // Source: ResourceLocalizationService APPLICATION_INITED, APPLICATION_RESOURCES_CLEANEDUP, // Source: Container APPLICATION_CONTAINER_FINISHED, // Source: Log Handler APPLICATION_LOG_HANDLING_INITED, APPLICATION_LOG_HANDLING_FINISHED, APPLICATION_LOG_HANDLING_FAILED }
1,337
32.45
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationFinishEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import org.apache.hadoop.yarn.api.records.ApplicationId; /** * Finish/abort event */ public class ApplicationFinishEvent extends ApplicationEvent { private final String diagnostic; /** * Application event to abort all containers associated with the app * @param appId to abort containers * @param diagnostic reason for the abort */ public ApplicationFinishEvent(ApplicationId appId, String diagnostic) { super(appId, ApplicationEventType.FINISH_APPLICATION); this.diagnostic = diagnostic; } /** * Why the app was aborted * @return diagnostic message */ public String getDiagnostic() { return diagnostic; } }
1,549
31.978723
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerInitEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerInitEvent; /** * Event sent from {@link ContainerManagerImpl} to {@link ApplicationImpl} to * request the initialization of a container. This is funneled through * the Application so that the application life-cycle can be checked, and container * launches can be delayed until the application is fully initialized. * * Once the application is initialized, * {@link ApplicationImpl.InitContainerTransition} simply passes this event on as a * {@link ContainerInitEvent}. * */ public class ApplicationContainerInitEvent extends ApplicationEvent { final Container container; public ApplicationContainerInitEvent(Container container) { super(container.getContainerId().getApplicationAttemptId() .getApplicationId(), ApplicationEventType.INIT_CONTAINER); this.container = container; } Container getContainer() { return container; } }
2,024
41.1875
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerExitEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; public class ContainerExitEvent extends ContainerEvent { private int exitCode; private final String diagnosticInfo; public ContainerExitEvent(ContainerId cID, ContainerEventType eventType, int exitCode, String diagnosticInfo) { super(cID, eventType); this.exitCode = exitCode; this.diagnosticInfo = diagnosticInfo; } public int getExitCode() { return this.exitCode; } public String getDiagnosticInfo() { return diagnosticInfo; } }
1,403
32.428571
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; public enum ContainerState { NEW, LOCALIZING, LOCALIZATION_FAILED, LOCALIZED, RUNNING, EXITED_WITH_SUCCESS, EXITED_WITH_FAILURE, KILLING, CONTAINER_CLEANEDUP_AFTER_KILL, CONTAINER_RESOURCES_CLEANINGUP, DONE }
1,086
40.807692
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerInitEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; public class ContainerInitEvent extends ContainerEvent { public ContainerInitEvent(ContainerId c) { super(c, ContainerEventType.INIT_CONTAINER); } }
1,085
36.448276
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ContainerResourceEvent extends ContainerEvent { private final LocalResourceRequest rsrc; public ContainerResourceEvent(ContainerId container, ContainerEventType type, LocalResourceRequest rsrc) { super(container, type); this.rsrc = rsrc; } public LocalResourceRequest getResource() { return rsrc; } }
1,370
35.078947
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import java.io.IOException; import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache.SharedCacheUploadEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStartMonitoringEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStopMonitoringEvent; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.MultipleArcTransition; import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.SystemClock; public class ContainerImpl implements Container { private final Lock readLock; private final Lock writeLock; private final Dispatcher dispatcher; private final NMStateStoreService stateStore; private final Credentials credentials; private final NodeManagerMetrics metrics; private final ContainerLaunchContext launchContext; private final ContainerTokenIdentifier containerTokenIdentifier; private final ContainerId containerId; private final Resource resource; private final String user; private int exitCode = ContainerExitStatus.INVALID; private final StringBuilder diagnostics; private boolean wasLaunched; private long containerLocalizationStartTime; private long containerLaunchStartTime; private static Clock clock = new SystemClock(); /** The NM-wide configuration - not specific to this container */ private final Configuration daemonConf; private static final Log LOG = LogFactory.getLog(ContainerImpl.class); private final Map<LocalResourceRequest,List<String>> pendingResources = new HashMap<LocalResourceRequest,List<String>>(); private final Map<Path,List<String>> localizedResources = new HashMap<Path,List<String>>(); private final List<LocalResourceRequest> publicRsrcs = new ArrayList<LocalResourceRequest>(); private final List<LocalResourceRequest> privateRsrcs = new ArrayList<LocalResourceRequest>(); private final List<LocalResourceRequest> appRsrcs = new ArrayList<LocalResourceRequest>(); private final Map<LocalResourceRequest, Path> resourcesToBeUploaded = new ConcurrentHashMap<LocalResourceRequest, Path>(); private final Map<LocalResourceRequest, Boolean> resourcesUploadPolicies = new ConcurrentHashMap<LocalResourceRequest, Boolean>(); // whether container has been recovered after a restart private RecoveredContainerStatus recoveredStatus = RecoveredContainerStatus.REQUESTED; // whether container was marked as killed after recovery private boolean recoveredAsKilled = false; public ContainerImpl(Configuration conf, Dispatcher dispatcher, NMStateStoreService stateStore, ContainerLaunchContext launchContext, Credentials creds, NodeManagerMetrics metrics, ContainerTokenIdentifier containerTokenIdentifier) { this.daemonConf = conf; this.dispatcher = dispatcher; this.stateStore = stateStore; this.launchContext = launchContext; this.containerTokenIdentifier = containerTokenIdentifier; this.containerId = containerTokenIdentifier.getContainerID(); this.resource = containerTokenIdentifier.getResource(); this.diagnostics = new StringBuilder(); this.credentials = creds; this.metrics = metrics; user = containerTokenIdentifier.getApplicationSubmitter(); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.readLock = readWriteLock.readLock(); this.writeLock = readWriteLock.writeLock(); stateMachine = stateMachineFactory.make(this); } // constructor for a recovered container public ContainerImpl(Configuration conf, Dispatcher dispatcher, NMStateStoreService stateStore, ContainerLaunchContext launchContext, Credentials creds, NodeManagerMetrics metrics, ContainerTokenIdentifier containerTokenIdentifier, RecoveredContainerStatus recoveredStatus, int exitCode, String diagnostics, boolean wasKilled) { this(conf, dispatcher, stateStore, launchContext, creds, metrics, containerTokenIdentifier); this.recoveredStatus = recoveredStatus; this.exitCode = exitCode; this.recoveredAsKilled = wasKilled; this.diagnostics.append(diagnostics); } private static final ContainerDiagnosticsUpdateTransition UPDATE_DIAGNOSTICS_TRANSITION = new ContainerDiagnosticsUpdateTransition(); // State Machine for each container. private static StateMachineFactory <ContainerImpl, ContainerState, ContainerEventType, ContainerEvent> stateMachineFactory = new StateMachineFactory<ContainerImpl, ContainerState, ContainerEventType, ContainerEvent>(ContainerState.NEW) // From NEW State .addTransition(ContainerState.NEW, EnumSet.of(ContainerState.LOCALIZING, ContainerState.LOCALIZED, ContainerState.LOCALIZATION_FAILED, ContainerState.DONE), ContainerEventType.INIT_CONTAINER, new RequestResourcesTransition()) .addTransition(ContainerState.NEW, ContainerState.NEW, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.NEW, ContainerState.DONE, ContainerEventType.KILL_CONTAINER, new KillOnNewTransition()) // From LOCALIZING State .addTransition(ContainerState.LOCALIZING, EnumSet.of(ContainerState.LOCALIZING, ContainerState.LOCALIZED), ContainerEventType.RESOURCE_LOCALIZED, new LocalizedTransition()) .addTransition(ContainerState.LOCALIZING, ContainerState.LOCALIZATION_FAILED, ContainerEventType.RESOURCE_FAILED, new ResourceFailedTransition()) .addTransition(ContainerState.LOCALIZING, ContainerState.LOCALIZING, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.LOCALIZING, ContainerState.KILLING, ContainerEventType.KILL_CONTAINER, new KillDuringLocalizationTransition()) // From LOCALIZATION_FAILED State .addTransition(ContainerState.LOCALIZATION_FAILED, ContainerState.DONE, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, new LocalizationFailedToDoneTransition()) .addTransition(ContainerState.LOCALIZATION_FAILED, ContainerState.LOCALIZATION_FAILED, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) // container not launched so kill is a no-op .addTransition(ContainerState.LOCALIZATION_FAILED, ContainerState.LOCALIZATION_FAILED, ContainerEventType.KILL_CONTAINER) // container cleanup triggers a release of all resources // regardless of whether they were localized or not // LocalizedResource handles release event in all states .addTransition(ContainerState.LOCALIZATION_FAILED, ContainerState.LOCALIZATION_FAILED, ContainerEventType.RESOURCE_LOCALIZED) .addTransition(ContainerState.LOCALIZATION_FAILED, ContainerState.LOCALIZATION_FAILED, ContainerEventType.RESOURCE_FAILED) // From LOCALIZED State .addTransition(ContainerState.LOCALIZED, ContainerState.RUNNING, ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition()) .addTransition(ContainerState.LOCALIZED, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, new ExitedWithFailureTransition(true)) .addTransition(ContainerState.LOCALIZED, ContainerState.LOCALIZED, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.LOCALIZED, ContainerState.KILLING, ContainerEventType.KILL_CONTAINER, new KillTransition()) // From RUNNING State .addTransition(ContainerState.RUNNING, ContainerState.EXITED_WITH_SUCCESS, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS, new ExitedWithSuccessTransition(true)) .addTransition(ContainerState.RUNNING, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, new ExitedWithFailureTransition(true)) .addTransition(ContainerState.RUNNING, ContainerState.RUNNING, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.RUNNING, ContainerState.KILLING, ContainerEventType.KILL_CONTAINER, new KillTransition()) .addTransition(ContainerState.RUNNING, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, new KilledExternallyTransition()) // From CONTAINER_EXITED_WITH_SUCCESS State .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, new ExitedWithSuccessToDoneTransition()) .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.EXITED_WITH_SUCCESS, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.EXITED_WITH_SUCCESS, ContainerEventType.KILL_CONTAINER) // From EXITED_WITH_FAILURE State .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.DONE, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, new ExitedWithFailureToDoneTransition()) .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.EXITED_WITH_FAILURE, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.KILL_CONTAINER) // From KILLING State. .addTransition(ContainerState.KILLING, ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, new ContainerKilledTransition()) .addTransition(ContainerState.KILLING, ContainerState.KILLING, ContainerEventType.RESOURCE_LOCALIZED, new LocalizedResourceDuringKillTransition()) .addTransition(ContainerState.KILLING, ContainerState.KILLING, ContainerEventType.RESOURCE_FAILED) .addTransition(ContainerState.KILLING, ContainerState.KILLING, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.KILLING, ContainerState.KILLING, ContainerEventType.KILL_CONTAINER) .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_SUCCESS, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS, new ExitedWithSuccessTransition(false)) .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_FAILURE, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, new ExitedWithFailureTransition(false)) .addTransition(ContainerState.KILLING, ContainerState.DONE, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, new KillingToDoneTransition()) // Handle a launched container during killing stage is a no-op // as cleanup container is always handled after launch container event // in the container launcher .addTransition(ContainerState.KILLING, ContainerState.KILLING, ContainerEventType.CONTAINER_LAUNCHED) // From CONTAINER_CLEANEDUP_AFTER_KILL State. .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, ContainerState.DONE, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP, new ContainerCleanedupAfterKillToDoneTransition()) .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, EnumSet.of(ContainerEventType.KILL_CONTAINER, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE)) // From DONE .addTransition(ContainerState.DONE, ContainerState.DONE, ContainerEventType.KILL_CONTAINER) .addTransition(ContainerState.DONE, ContainerState.DONE, ContainerEventType.INIT_CONTAINER) .addTransition(ContainerState.DONE, ContainerState.DONE, ContainerEventType.UPDATE_DIAGNOSTICS_MSG, UPDATE_DIAGNOSTICS_TRANSITION) // This transition may result when // we notify container of failed localization if localizer thread (for // that container) fails for some reason .addTransition(ContainerState.DONE, ContainerState.DONE, EnumSet.of(ContainerEventType.RESOURCE_FAILED, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE)) // create the topology tables .installTopology(); private final StateMachine<ContainerState, ContainerEventType, ContainerEvent> stateMachine; public org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() { switch (stateMachine.getCurrentState()) { case NEW: case LOCALIZING: case LOCALIZATION_FAILED: case LOCALIZED: case RUNNING: case EXITED_WITH_SUCCESS: case EXITED_WITH_FAILURE: case KILLING: case CONTAINER_CLEANEDUP_AFTER_KILL: case CONTAINER_RESOURCES_CLEANINGUP: return org.apache.hadoop.yarn.api.records.ContainerState.RUNNING; case DONE: default: return org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE; } } @Override public String getUser() { this.readLock.lock(); try { return this.user; } finally { this.readLock.unlock(); } } @Override public Map<Path,List<String>> getLocalizedResources() { this.readLock.lock(); try { if (ContainerState.LOCALIZED == getContainerState()) { return localizedResources; } else { return null; } } finally { this.readLock.unlock(); } } @Override public Credentials getCredentials() { this.readLock.lock(); try { return credentials; } finally { this.readLock.unlock(); } } @Override public ContainerState getContainerState() { this.readLock.lock(); try { return stateMachine.getCurrentState(); } finally { this.readLock.unlock(); } } @Override public ContainerLaunchContext getLaunchContext() { this.readLock.lock(); try { return launchContext; } finally { this.readLock.unlock(); } } @Override public ContainerStatus cloneAndGetContainerStatus() { this.readLock.lock(); try { return BuilderUtils.newContainerStatus(this.containerId, getCurrentState(), diagnostics.toString(), exitCode); } finally { this.readLock.unlock(); } } @Override public NMContainerStatus getNMContainerStatus() { this.readLock.lock(); try { return NMContainerStatus.newInstance(this.containerId, getCurrentState(), getResource(), diagnostics.toString(), exitCode, containerTokenIdentifier.getPriority(), containerTokenIdentifier.getCreationTime(), containerTokenIdentifier.getNodeLabelExpression()); } finally { this.readLock.unlock(); } } @Override public ContainerId getContainerId() { return this.containerId; } @Override public Resource getResource() { return this.resource; } @Override public ContainerTokenIdentifier getContainerTokenIdentifier() { this.readLock.lock(); try { return this.containerTokenIdentifier; } finally { this.readLock.unlock(); } } @SuppressWarnings("unchecked") private void sendFinishedEvents() { // Inform the application @SuppressWarnings("rawtypes") EventHandler eventHandler = dispatcher.getEventHandler(); eventHandler.handle(new ApplicationContainerFinishedEvent(containerId)); // Remove the container from the resource-monitor eventHandler.handle(new ContainerStopMonitoringEvent(containerId)); // Tell the logService too eventHandler.handle(new LogHandlerContainerFinishedEvent( containerId, exitCode)); } @SuppressWarnings("unchecked") // dispatcher not typed private void sendLaunchEvent() { ContainersLauncherEventType launcherEvent = ContainersLauncherEventType.LAUNCH_CONTAINER; if (recoveredStatus == RecoveredContainerStatus.LAUNCHED) { // try to recover a container that was previously launched launcherEvent = ContainersLauncherEventType.RECOVER_CONTAINER; } containerLaunchStartTime = clock.getTime(); dispatcher.getEventHandler().handle( new ContainersLauncherEvent(this, launcherEvent)); } // Inform the ContainersMonitor to start monitoring the container's // resource usage. @SuppressWarnings("unchecked") // dispatcher not typed private void sendContainerMonitorStartEvent() { long launchDuration = clock.getTime() - containerLaunchStartTime; metrics.addContainerLaunchDuration(launchDuration); long pmemBytes = getResource().getMemory() * 1024 * 1024L; float pmemRatio = daemonConf.getFloat( YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); long vmemBytes = (long) (pmemRatio * pmemBytes); int cpuVcores = getResource().getVirtualCores(); long localizationDuration = containerLaunchStartTime - containerLocalizationStartTime; dispatcher.getEventHandler().handle( new ContainerStartMonitoringEvent(containerId, vmemBytes, pmemBytes, cpuVcores, launchDuration, localizationDuration)); } private void addDiagnostics(String... diags) { for (String s : diags) { this.diagnostics.append(s); } try { stateStore.storeContainerDiagnostics(containerId, diagnostics); } catch (IOException e) { LOG.warn("Unable to update diagnostics in state store for " + containerId, e); } } @SuppressWarnings("unchecked") // dispatcher not typed public void cleanup() { Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrc = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); if (!publicRsrcs.isEmpty()) { rsrc.put(LocalResourceVisibility.PUBLIC, publicRsrcs); } if (!privateRsrcs.isEmpty()) { rsrc.put(LocalResourceVisibility.PRIVATE, privateRsrcs); } if (!appRsrcs.isEmpty()) { rsrc.put(LocalResourceVisibility.APPLICATION, appRsrcs); } dispatcher.getEventHandler().handle( new ContainerLocalizationCleanupEvent(this, rsrc)); } static class ContainerTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { // Just drain the event and change the state. } } /** * State transition when a NEW container receives the INIT_CONTAINER * message. * * If there are resources to localize, sends a * ContainerLocalizationRequest (INIT_CONTAINER_RESOURCES) * to the ResourceLocalizationManager and enters LOCALIZING state. * * If there are no resources to localize, sends LAUNCH_CONTAINER event * and enters LOCALIZED state directly. * * If there are any invalid resources specified, enters LOCALIZATION_FAILED * directly. */ @SuppressWarnings("unchecked") // dispatcher not typed static class RequestResourcesTransition implements MultipleArcTransition<ContainerImpl,ContainerEvent,ContainerState> { @Override public ContainerState transition(ContainerImpl container, ContainerEvent event) { if (container.recoveredStatus == RecoveredContainerStatus.COMPLETED) { container.sendFinishedEvents(); return ContainerState.DONE; } else if (container.recoveredAsKilled && container.recoveredStatus == RecoveredContainerStatus.REQUESTED) { // container was killed but never launched container.metrics.killedContainer(); NMAuditLogger.logSuccess(container.user, AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); container.metrics.releaseContainer(container.resource); container.sendFinishedEvents(); return ContainerState.DONE; } final ContainerLaunchContext ctxt = container.launchContext; container.metrics.initingContainer(); container.dispatcher.getEventHandler().handle(new AuxServicesEvent (AuxServicesEventType.CONTAINER_INIT, container)); // Inform the AuxServices about the opaque serviceData Map<String,ByteBuffer> csd = ctxt.getServiceData(); if (csd != null) { // This can happen more than once per Application as each container may // have distinct service data for (Map.Entry<String,ByteBuffer> service : csd.entrySet()) { container.dispatcher.getEventHandler().handle( new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT, container.user, container.containerId .getApplicationAttemptId().getApplicationId(), service.getKey().toString(), service.getValue())); } } container.containerLocalizationStartTime = clock.getTime(); // Send requests for public, private resources Map<String,LocalResource> cntrRsrc = ctxt.getLocalResources(); if (!cntrRsrc.isEmpty()) { try { for (Map.Entry<String,LocalResource> rsrc : cntrRsrc.entrySet()) { try { LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue()); List<String> links = container.pendingResources.get(req); if (links == null) { links = new ArrayList<String>(); container.pendingResources.put(req, links); } links.add(rsrc.getKey()); storeSharedCacheUploadPolicy(container, req, rsrc.getValue() .getShouldBeUploadedToSharedCache()); switch (rsrc.getValue().getVisibility()) { case PUBLIC: container.publicRsrcs.add(req); break; case PRIVATE: container.privateRsrcs.add(req); break; case APPLICATION: container.appRsrcs.add(req); break; } } catch (URISyntaxException e) { LOG.info("Got exception parsing " + rsrc.getKey() + " and value " + rsrc.getValue()); throw e; } } } catch (URISyntaxException e) { // malformed resource; abort container launch LOG.warn("Failed to parse resource-request", e); container.cleanup(); container.metrics.endInitingContainer(); return ContainerState.LOCALIZATION_FAILED; } Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new LinkedHashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); if (!container.publicRsrcs.isEmpty()) { req.put(LocalResourceVisibility.PUBLIC, container.publicRsrcs); } if (!container.privateRsrcs.isEmpty()) { req.put(LocalResourceVisibility.PRIVATE, container.privateRsrcs); } if (!container.appRsrcs.isEmpty()) { req.put(LocalResourceVisibility.APPLICATION, container.appRsrcs); } container.dispatcher.getEventHandler().handle( new ContainerLocalizationRequestEvent(container, req)); return ContainerState.LOCALIZING; } else { container.sendLaunchEvent(); container.metrics.endInitingContainer(); return ContainerState.LOCALIZED; } } } /** * Store the resource's shared cache upload policies * Given LocalResourceRequest can be shared across containers in * LocalResourcesTrackerImpl, we preserve the upload policies here. * In addition, it is possible for the application to create several * "identical" LocalResources as part of * ContainerLaunchContext.setLocalResources with different symlinks. * There is a corner case where these "identical" local resources have * different upload policies. For that scenario, upload policy will be set to * true as long as there is at least one LocalResource entry with * upload policy set to true. */ private static void storeSharedCacheUploadPolicy(ContainerImpl container, LocalResourceRequest resourceRequest, Boolean uploadPolicy) { Boolean storedUploadPolicy = container.resourcesUploadPolicies.get(resourceRequest); if (storedUploadPolicy == null || (!storedUploadPolicy && uploadPolicy)) { container.resourcesUploadPolicies.put(resourceRequest, uploadPolicy); } } /** * Transition when one of the requested resources for this container * has been successfully localized. */ static class LocalizedTransition implements MultipleArcTransition<ContainerImpl,ContainerEvent,ContainerState> { @SuppressWarnings("unchecked") @Override public ContainerState transition(ContainerImpl container, ContainerEvent event) { ContainerResourceLocalizedEvent rsrcEvent = (ContainerResourceLocalizedEvent) event; LocalResourceRequest resourceRequest = rsrcEvent.getResource(); Path location = rsrcEvent.getLocation(); List<String> syms = container.pendingResources.remove(resourceRequest); if (null == syms) { LOG.warn("Localized unknown resource " + resourceRequest + " for container " + container.containerId); assert false; // fail container? return ContainerState.LOCALIZING; } container.localizedResources.put(location, syms); // check to see if this resource should be uploaded to the shared cache // as well if (shouldBeUploadedToSharedCache(container, resourceRequest)) { container.resourcesToBeUploaded.put(resourceRequest, location); } if (!container.pendingResources.isEmpty()) { return ContainerState.LOCALIZING; } container.dispatcher.getEventHandler().handle( new ContainerLocalizationEvent(LocalizationEventType. CONTAINER_RESOURCES_LOCALIZED, container)); container.sendLaunchEvent(); container.metrics.endInitingContainer(); // If this is a recovered container that has already launched, skip // uploading resources to the shared cache. We do this to avoid uploading // the same resources multiple times. The tradeoff is that in the case of // a recovered container, there is a chance that resources don't get // uploaded into the shared cache. This is OK because resources are not // acknowledged by the SCM until they have been uploaded by the node // manager. if (container.recoveredStatus != RecoveredContainerStatus.LAUNCHED && container.recoveredStatus != RecoveredContainerStatus.COMPLETED) { // kick off uploads to the shared cache container.dispatcher.getEventHandler().handle( new SharedCacheUploadEvent(container.resourcesToBeUploaded, container .getLaunchContext(), container.getUser(), SharedCacheUploadEventType.UPLOAD)); } return ContainerState.LOCALIZED; } } /** * Transition from LOCALIZED state to RUNNING state upon receiving * a CONTAINER_LAUNCHED event */ static class LaunchTransition extends ContainerTransition { @SuppressWarnings("unchecked") @Override public void transition(ContainerImpl container, ContainerEvent event) { container.sendContainerMonitorStartEvent(); container.metrics.runningContainer(); container.wasLaunched = true; if (container.recoveredAsKilled) { LOG.info("Killing " + container.containerId + " due to recovered as killed"); container.addDiagnostics("Container recovered as killed.\n"); container.dispatcher.getEventHandler().handle( new ContainersLauncherEvent(container, ContainersLauncherEventType.CLEANUP_CONTAINER)); } } } /** * Transition from RUNNING or KILLING state to EXITED_WITH_SUCCESS state * upon EXITED_WITH_SUCCESS message. */ @SuppressWarnings("unchecked") // dispatcher not typed static class ExitedWithSuccessTransition extends ContainerTransition { boolean clCleanupRequired; public ExitedWithSuccessTransition(boolean clCleanupRequired) { this.clCleanupRequired = clCleanupRequired; } @Override public void transition(ContainerImpl container, ContainerEvent event) { // Set exit code to 0 on success container.exitCode = 0; // TODO: Add containerWorkDir to the deletion service. if (clCleanupRequired) { container.dispatcher.getEventHandler().handle( new ContainersLauncherEvent(container, ContainersLauncherEventType.CLEANUP_CONTAINER)); } container.cleanup(); } } /** * Transition to EXITED_WITH_FAILURE state upon * CONTAINER_EXITED_WITH_FAILURE state. **/ @SuppressWarnings("unchecked") // dispatcher not typed static class ExitedWithFailureTransition extends ContainerTransition { boolean clCleanupRequired; public ExitedWithFailureTransition(boolean clCleanupRequired) { this.clCleanupRequired = clCleanupRequired; } @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerExitEvent exitEvent = (ContainerExitEvent) event; container.exitCode = exitEvent.getExitCode(); if (exitEvent.getDiagnosticInfo() != null) { container.addDiagnostics(exitEvent.getDiagnosticInfo(), "\n"); } // TODO: Add containerWorkDir to the deletion service. // TODO: Add containerOuputDir to the deletion service. if (clCleanupRequired) { container.dispatcher.getEventHandler().handle( new ContainersLauncherEvent(container, ContainersLauncherEventType.CLEANUP_CONTAINER)); } container.cleanup(); } } /** * Transition to EXITED_WITH_FAILURE upon receiving KILLED_ON_REQUEST */ static class KilledExternallyTransition extends ExitedWithFailureTransition { KilledExternallyTransition() { super(true); } @Override public void transition(ContainerImpl container, ContainerEvent event) { super.transition(container, event); container.addDiagnostics("Killed by external signal\n"); } } /** * Transition from LOCALIZING to LOCALIZATION_FAILED upon receiving * RESOURCE_FAILED event. */ static class ResourceFailedTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerResourceFailedEvent rsrcFailedEvent = (ContainerResourceFailedEvent) event; container.addDiagnostics(rsrcFailedEvent.getDiagnosticMessage(), "\n"); // Inform the localizer to decrement reference counts and cleanup // resources. container.cleanup(); container.metrics.endInitingContainer(); } } /** * Transition from LOCALIZING to KILLING upon receiving * KILL_CONTAINER event. */ static class KillDuringLocalizationTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { // Inform the localizer to decrement reference counts and cleanup // resources. container.cleanup(); container.metrics.endInitingContainer(); ContainerKillEvent killEvent = (ContainerKillEvent) event; container.exitCode = killEvent.getContainerExitStatus(); container.addDiagnostics(killEvent.getDiagnostic(), "\n"); container.addDiagnostics("Container is killed before being launched.\n"); } } /** * Remain in KILLING state when receiving a RESOURCE_LOCALIZED request * while in the process of killing. */ static class LocalizedResourceDuringKillTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerResourceLocalizedEvent rsrcEvent = (ContainerResourceLocalizedEvent) event; List<String> syms = container.pendingResources.remove(rsrcEvent.getResource()); if (null == syms) { LOG.warn("Localized unknown resource " + rsrcEvent.getResource() + " for container " + container.containerId); assert false; // fail container? return; } container.localizedResources.put(rsrcEvent.getLocation(), syms); } } /** * Transitions upon receiving KILL_CONTAINER: * - LOCALIZED -> KILLING * - RUNNING -> KILLING */ @SuppressWarnings("unchecked") // dispatcher not typed static class KillTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { // Kill the process/process-grp container.dispatcher.getEventHandler().handle( new ContainersLauncherEvent(container, ContainersLauncherEventType.CLEANUP_CONTAINER)); ContainerKillEvent killEvent = (ContainerKillEvent) event; container.addDiagnostics(killEvent.getDiagnostic(), "\n"); container.exitCode = killEvent.getContainerExitStatus(); } } /** * Transition from KILLING to CONTAINER_CLEANEDUP_AFTER_KILL * upon receiving CONTAINER_KILLED_ON_REQUEST. */ static class ContainerKilledTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerExitEvent exitEvent = (ContainerExitEvent) event; if (container.hasDefaultExitCode()) { container.exitCode = exitEvent.getExitCode(); } if (exitEvent.getDiagnosticInfo() != null) { container.addDiagnostics(exitEvent.getDiagnosticInfo(), "\n"); } // The process/process-grp is killed. Decrement reference counts and // cleanup resources container.cleanup(); } } /** * Handle the following transitions: * - {LOCALIZATION_FAILED, EXITED_WITH_SUCCESS, EXITED_WITH_FAILURE, * KILLING, CONTAINER_CLEANEDUP_AFTER_KILL} * -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class ContainerDoneTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override @SuppressWarnings("unchecked") public void transition(ContainerImpl container, ContainerEvent event) { container.metrics.releaseContainer(container.resource); container.sendFinishedEvents(); //if the current state is NEW it means the CONTAINER_INIT was never // sent for the event, thus no need to send the CONTAINER_STOP if (container.getCurrentState() != org.apache.hadoop.yarn.api.records.ContainerState.NEW) { container.dispatcher.getEventHandler().handle(new AuxServicesEvent (AuxServicesEventType.CONTAINER_STOP, container)); } } } /** * Handle the following transition: * - NEW -> DONE upon KILL_CONTAINER */ static class KillOnNewTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerKillEvent killEvent = (ContainerKillEvent) event; container.exitCode = killEvent.getContainerExitStatus(); container.addDiagnostics(killEvent.getDiagnostic(), "\n"); container.addDiagnostics("Container is killed before being launched.\n"); container.metrics.killedContainer(); NMAuditLogger.logSuccess(container.user, AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Handle the following transition: * - LOCALIZATION_FAILED -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class LocalizationFailedToDoneTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { container.metrics.failedContainer(); NMAuditLogger.logFailure(container.user, AuditConstants.FINISH_FAILED_CONTAINER, "ContainerImpl", "Container failed with state: " + container.getContainerState(), container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Handle the following transition: * - EXITED_WITH_SUCCESS -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class ExitedWithSuccessToDoneTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { container.metrics.endRunningContainer(); container.metrics.completedContainer(); NMAuditLogger.logSuccess(container.user, AuditConstants.FINISH_SUCCESS_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Handle the following transition: * - EXITED_WITH_FAILURE -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class ExitedWithFailureToDoneTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { if (container.wasLaunched) { container.metrics.endRunningContainer(); } container.metrics.failedContainer(); NMAuditLogger.logFailure(container.user, AuditConstants.FINISH_FAILED_CONTAINER, "ContainerImpl", "Container failed with state: " + container.getContainerState(), container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Handle the following transition: * - KILLING -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class KillingToDoneTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { container.metrics.killedContainer(); NMAuditLogger.logSuccess(container.user, AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Handle the following transition: * CONTAINER_CLEANEDUP_AFTER_KILL -> DONE upon CONTAINER_RESOURCES_CLEANEDUP */ static class ContainerCleanedupAfterKillToDoneTransition extends ContainerDoneTransition { @Override public void transition(ContainerImpl container, ContainerEvent event) { if (container.wasLaunched) { container.metrics.endRunningContainer(); } container.metrics.killedContainer(); NMAuditLogger.logSuccess(container.user, AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); super.transition(container, event); } } /** * Update diagnostics, staying in the same state. */ static class ContainerDiagnosticsUpdateTransition implements SingleArcTransition<ContainerImpl, ContainerEvent> { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerDiagnosticsUpdateEvent updateEvent = (ContainerDiagnosticsUpdateEvent) event; container.addDiagnostics(updateEvent.getDiagnosticsUpdate(), "\n"); } } @Override public void handle(ContainerEvent event) { try { this.writeLock.lock(); ContainerId containerID = event.getContainerID(); LOG.debug("Processing " + containerID + " of type " + event.getType()); ContainerState oldState = stateMachine.getCurrentState(); ContainerState newState = null; try { newState = stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.warn("Can't handle this event at current state: Current: [" + oldState + "], eventType: [" + event.getType() + "]", e); } if (oldState != newState) { LOG.info("Container " + containerID + " transitioned from " + oldState + " to " + newState); } } finally { this.writeLock.unlock(); } } @Override public String toString() { this.readLock.lock(); try { return ConverterUtils.toString(this.containerId); } finally { this.readLock.unlock(); } } private boolean hasDefaultExitCode() { return (this.exitCode == ContainerExitStatus.INVALID); } /** * Returns whether the specific resource should be uploaded to the shared * cache. */ private static boolean shouldBeUploadedToSharedCache(ContainerImpl container, LocalResourceRequest resource) { return container.resourcesUploadPolicies.get(resource); } }
46,231
38.820844
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceFailedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ContainerResourceFailedEvent extends ContainerResourceEvent { private final String diagnosticMesage; public ContainerResourceFailedEvent(ContainerId container, LocalResourceRequest rsrc, String diagnosticMesage) { super(container, ContainerEventType.RESOURCE_FAILED, rsrc); this.diagnosticMesage = diagnosticMesage; } public String getDiagnosticMessage() { return diagnosticMesage; } }
1,471
37.736842
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.AbstractEvent; public class ContainerEvent extends AbstractEvent<ContainerEventType> { private final ContainerId containerID; public ContainerEvent(ContainerId cID, ContainerEventType eventType) { super(eventType); this.containerID = cID; } public ContainerId getContainerID() { return containerID; } }
1,293
32.179487
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ContainerResourceLocalizedEvent extends ContainerResourceEvent { private final Path loc; public ContainerResourceLocalizedEvent(ContainerId container, LocalResourceRequest rsrc, Path loc) { super(container, ContainerEventType.RESOURCE_LOCALIZED, rsrc); this.loc = loc; } public Path getLocation() { return loc; } }
1,418
35.384615
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; public interface Container extends EventHandler<ContainerEvent> { ContainerId getContainerId(); Resource getResource(); ContainerTokenIdentifier getContainerTokenIdentifier(); String getUser(); ContainerState getContainerState(); ContainerLaunchContext getLaunchContext(); Credentials getCredentials(); Map<Path,List<String>> getLocalizedResources(); ContainerStatus cloneAndGetContainerStatus(); NMContainerStatus getNMContainerStatus(); String toString(); }
1,918
31.525424
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; public enum ContainerEventType { // Producer: ContainerManager INIT_CONTAINER, KILL_CONTAINER, UPDATE_DIAGNOSTICS_MSG, CONTAINER_DONE, // DownloadManager CONTAINER_INITED, RESOURCE_LOCALIZED, RESOURCE_FAILED, CONTAINER_RESOURCES_CLEANEDUP, // Producer: ContainersLauncher CONTAINER_LAUNCHED, CONTAINER_EXITED_WITH_SUCCESS, CONTAINER_EXITED_WITH_FAILURE, CONTAINER_KILLED_ON_REQUEST, }
1,289
30.463415
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerDiagnosticsUpdateEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; public class ContainerDiagnosticsUpdateEvent extends ContainerEvent { private final String diagnosticsUpdate; public ContainerDiagnosticsUpdateEvent(ContainerId cID, String update) { super(cID, ContainerEventType.UPDATE_DIAGNOSTICS_MSG); this.diagnosticsUpdate = update; } public String getDiagnosticsUpdate() { return this.diagnosticsUpdate; } }
1,315
35.555556
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerKillEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import org.apache.hadoop.yarn.api.records.ContainerId; public class ContainerKillEvent extends ContainerEvent { private final String diagnostic; private final int exitStatus; public ContainerKillEvent(ContainerId cID, int exitStatus, String diagnostic) { super(cID, ContainerEventType.KILL_CONTAINER); this.exitStatus = exitStatus; this.diagnostic = diagnostic; } public String getDiagnostic() { return this.diagnostic; } public int getContainerExitStatus() { return this.exitStatus; } }
1,407
31
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; import org.apache.hadoop.classification.InterfaceAudience.Private; public class ContainerRuntimeConstants { /* Switch container runtimes. Work in progress: These * parameters may be changed/removed in the future. */ @Private public static final String ENV_CONTAINER_TYPE = "YARN_CONTAINER_RUNTIME_TYPE"; }
1,231
35.235294
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** An abstraction for various container runtime implementations. Examples * include Process Tree, Docker, Appc runtimes etc., These implementations * are meant for low-level OS container support - dependencies on * higher-level nodemananger constructs should be avoided. */ @InterfaceAudience.Private @InterfaceStability.Unstable public interface ContainerRuntime { /** Prepare a container to be ready for launch */ void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException; /** Launch a container. */ void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException; /** Signal a container - request to terminate, status check etc., */ void signalContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException; /** Any container cleanup that may be required. */ void reapContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException; }
1,983
38.68
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import java.util.Collections; import java.util.HashMap; import java.util.Map; @InterfaceAudience.Private @InterfaceStability.Unstable public final class ContainerRuntimeContext { private final Container container; private final Map<Attribute<?>, Object> executionAttributes; /** An attribute class that attempts to provide better type safety as compared * with using a map of string to object. * @param <T> */ public static final class Attribute<T> { private final Class<T> valueClass; private final String id; private Attribute(Class<T> valueClass, String id) { this.valueClass = valueClass; this.id = id; } @Override public int hashCode() { return valueClass.hashCode() + 31 * id.hashCode(); } @Override public boolean equals(Object obj) { if (obj == null || !(obj instanceof Attribute)){ return false; } Attribute<?> attribute = (Attribute<?>) obj; return valueClass.equals(attribute.valueClass) && id.equals(attribute.id); } public static <T> Attribute<T> attribute(Class<T> valueClass, String id) { return new Attribute<T>(valueClass, id); } } public static final class Builder { private final Container container; private Map<Attribute<?>, Object> executionAttributes; public Builder(Container container) { executionAttributes = new HashMap<>(); this.container = container; } public <E> Builder setExecutionAttribute(Attribute<E> attribute, E value) { this.executionAttributes.put(attribute, attribute.valueClass.cast(value)); return this; } public ContainerRuntimeContext build() { return new ContainerRuntimeContext(this); } } private ContainerRuntimeContext(Builder builder) { this.container = builder.container; this.executionAttributes = builder.executionAttributes; } public Container getContainer() { return this.container; } public Map<Attribute<?>, Object> getExecutionAttributes() { return Collections.unmodifiableMap(this.executionAttributes); } public <E> E getExecutionAttribute(Attribute<E> attribute) { return attribute.valueClass.cast(executionAttributes.get(attribute)); } }
3,356
30.669811
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.exceptions.YarnException; /** Exception caused in a container runtime impl. 'Runtime' is not used in * the class name to avoid confusion with a java RuntimeException */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ContainerExecutionException extends YarnException { private static final long serialVersionUID = 1L; private static final Integer EXIT_CODE_UNSET = -1; private static final String OUTPUT_UNSET = "<unknown>"; private Integer exitCode; private String output; private String errorOutput; public ContainerExecutionException(String message) { super(message); exitCode = EXIT_CODE_UNSET; output = OUTPUT_UNSET; errorOutput = OUTPUT_UNSET; } public ContainerExecutionException(Throwable throwable) { super(throwable); exitCode = EXIT_CODE_UNSET; output = OUTPUT_UNSET; errorOutput = OUTPUT_UNSET; } public ContainerExecutionException(String message, Integer exitCode, String output, String errorOutput) { super(message); this.exitCode = exitCode; this.output = output; this.errorOutput = errorOutput; } public ContainerExecutionException(Throwable cause, Integer exitCode, String output, String errorOutput) { super(cause); this.exitCode = exitCode; this.output = output; this.errorOutput = errorOutput; } public Integer getExitCode() { return exitCode; } public String getOutput() { return output; } public String getErrorOutput() { return errorOutput; } }
2,586
29.435294
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime; /** Linux-specific container runtime implementations must implement this * interface. */ @InterfaceAudience.Private @InterfaceStability.Unstable public interface LinuxContainerRuntime extends ContainerRuntime { void initialize(Configuration conf) throws ContainerExecutionException; }
1,564
39.128205
102
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import java.util.Map; @InterfaceAudience.Private @InterfaceStability.Unstable public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime { private static final Log LOG = LogFactory .getLog(DelegatingLinuxContainerRuntime.class); private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime; private DockerLinuxContainerRuntime dockerLinuxContainerRuntime; @Override public void initialize(Configuration conf) throws ContainerExecutionException { PrivilegedOperationExecutor privilegedOperationExecutor = PrivilegedOperationExecutor.getInstance(conf); defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime( privilegedOperationExecutor); defaultLinuxContainerRuntime.initialize(conf); dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime( privilegedOperationExecutor); dockerLinuxContainerRuntime.initialize(conf); } private LinuxContainerRuntime pickContainerRuntime(Container container) { Map<String, String> env = container.getLaunchContext().getEnvironment(); LinuxContainerRuntime runtime; if (DockerLinuxContainerRuntime.isDockerContainerRequested(env)){ runtime = dockerLinuxContainerRuntime; } else { runtime = defaultLinuxContainerRuntime; } if (LOG.isInfoEnabled()) { LOG.info("Using container runtime: " + runtime.getClass() .getSimpleName()); } return runtime; } @Override public void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); LinuxContainerRuntime runtime = pickContainerRuntime(container); runtime.prepareContainer(ctx); } @Override public void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); LinuxContainerRuntime runtime = pickContainerRuntime(container); runtime.launchContainer(ctx); } @Override public void signalContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); LinuxContainerRuntime runtime = pickContainerRuntime(container); runtime.signalContainer(ctx); } @Override public void reapContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); LinuxContainerRuntime runtime = pickContainerRuntime(container); runtime.reapContainer(ctx); } }
4,095
36.236364
111
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import java.util.List; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; @InterfaceAudience.Private @InterfaceStability.Unstable public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime { private static final Log LOG = LogFactory .getLog(DefaultLinuxContainerRuntime.class); private Configuration conf; private final PrivilegedOperationExecutor privilegedOperationExecutor; public DefaultLinuxContainerRuntime(PrivilegedOperationExecutor privilegedOperationExecutor) { this.privilegedOperationExecutor = privilegedOperationExecutor; } @Override public void initialize(Configuration conf) throws ContainerExecutionException { this.conf = conf; } @Override public void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { //nothing to do here at the moment. } @Override public void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); PrivilegedOperation launchOp = new PrivilegedOperation( PrivilegedOperation.OperationType.LAUNCH_CONTAINER, (String) null); //All of these arguments are expected to be available in the runtime context launchOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), ctx.getExecutionAttribute(USER), Integer.toString(PrivilegedOperation. RunAsUserCommand.LAUNCH_CONTAINER.getValue()), ctx.getExecutionAttribute(APPID), ctx.getExecutionAttribute(CONTAINER_ID_STR), ctx.getExecutionAttribute(CONTAINER_WORK_DIR).toString(), ctx.getExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH).toUri() .getPath(), ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(), ctx.getExecutionAttribute(PID_FILE_PATH).toString(), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, ctx.getExecutionAttribute(LOCAL_DIRS)), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, ctx.getExecutionAttribute(LOG_DIRS)), ctx.getExecutionAttribute(RESOURCES_OPTIONS)); String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE); if (tcCommandFile != null) { launchOp.appendArgs(tcCommandFile); } //List<String> -> stored as List -> fetched/converted to List<String> //we can't do better here thanks to type-erasure @SuppressWarnings("unchecked") List<String> prefixCommands = (List<String>) ctx.getExecutionAttribute( CONTAINER_LAUNCH_PREFIX_COMMANDS); try { privilegedOperationExecutor.executePrivilegedOperation(prefixCommands, launchOp, null, container.getLaunchContext().getEnvironment(), false); } catch (PrivilegedOperationException e) { LOG.warn("Launch container failed. Exception: ", e); throw new ContainerExecutionException("Launch container failed", e .getExitCode(), e.getOutput(), e.getErrorOutput()); } } @Override public void signalContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); PrivilegedOperation signalOp = new PrivilegedOperation( PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null); signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), ctx.getExecutionAttribute(USER), Integer.toString(PrivilegedOperation.RunAsUserCommand .SIGNAL_CONTAINER.getValue()), ctx.getExecutionAttribute(PID), Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue())); try { PrivilegedOperationExecutor executor = PrivilegedOperationExecutor .getInstance(conf); executor.executePrivilegedOperation(null, signalOp, null, container.getLaunchContext().getEnvironment(), false); } catch (PrivilegedOperationException e) { LOG.warn("Signal container failed. Exception: ", e); throw new ContainerExecutionException("Signal container failed", e .getExitCode(), e.getOutput(), e.getErrorOutput()); } } @Override public void reapContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { } }
6,171
40.422819
120
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext.Attribute; import java.util.List; import java.util.Map; public final class LinuxContainerRuntimeConstants { private LinuxContainerRuntimeConstants() { } public static final Attribute<Map> LOCALIZED_RESOURCES = Attribute .attribute(Map.class, "localized_resources"); public static final Attribute<List> CONTAINER_LAUNCH_PREFIX_COMMANDS = Attribute.attribute(List.class, "container_launch_prefix_commands"); public static final Attribute<String> RUN_AS_USER = Attribute.attribute(String.class, "run_as_user"); public static final Attribute<String> USER = Attribute.attribute(String.class, "user"); public static final Attribute<String> APPID = Attribute.attribute(String.class, "appid"); public static final Attribute<String> CONTAINER_ID_STR = Attribute .attribute(String.class, "container_id_str"); public static final Attribute<Path> CONTAINER_WORK_DIR = Attribute .attribute(Path.class, "container_work_dir"); public static final Attribute<Path> NM_PRIVATE_CONTAINER_SCRIPT_PATH = Attribute.attribute(Path.class, "nm_private_container_script_path"); public static final Attribute<Path> NM_PRIVATE_TOKENS_PATH = Attribute .attribute(Path.class, "nm_private_tokens_path"); public static final Attribute<Path> PID_FILE_PATH = Attribute.attribute( Path.class, "pid_file_path"); public static final Attribute<List> LOCAL_DIRS = Attribute.attribute( List.class, "local_dirs"); public static final Attribute<List> LOG_DIRS = Attribute.attribute( List.class, "log_dirs"); public static final Attribute<String> RESOURCES_OPTIONS = Attribute.attribute( String.class, "resources_options"); public static final Attribute<String> TC_COMMAND_FILE = Attribute.attribute( String.class, "tc_command_file"); public static final Attribute<String> CGROUP_RELATIVE_PATH = Attribute .attribute(String.class, "cgroup_relative_path"); public static final Attribute<String> PID = Attribute.attribute( String.class, "pid"); public static final Attribute<ContainerExecutor.Signal> SIGNAL = Attribute .attribute(ContainerExecutor.Signal.class, "signal"); }
3,287
46.652174
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerClient; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; @InterfaceAudience.Private @InterfaceStability.Unstable public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { private static final Log LOG = LogFactory.getLog( DockerLinuxContainerRuntime.class); @InterfaceAudience.Private public static final String ENV_DOCKER_CONTAINER_IMAGE = "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE"; @InterfaceAudience.Private public static final String ENV_DOCKER_CONTAINER_IMAGE_FILE = "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE_FILE"; @InterfaceAudience.Private public static final String ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE = "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE"; private Configuration conf; private DockerClient dockerClient; private PrivilegedOperationExecutor privilegedOperationExecutor; public static boolean isDockerContainerRequested( Map<String, String> env) { if (env == null) { return false; } String type = env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE); return type != null && type.equals("docker"); } public DockerLinuxContainerRuntime(PrivilegedOperationExecutor privilegedOperationExecutor) { this.privilegedOperationExecutor = privilegedOperationExecutor; } @Override public void initialize(Configuration conf) throws ContainerExecutionException { this.conf = conf; dockerClient = new DockerClient(conf); } @Override public void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { } public void addCGroupParentIfRequired(String resourcesOptions, String containerIdStr, DockerRunCommand runCommand) throws ContainerExecutionException { if (resourcesOptions.equals( (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation .CGROUP_ARG_NO_TASKS))) { if (LOG.isInfoEnabled()) { LOG.info("no resource restrictions specified. not using docker's " + "cgroup options"); } } else { if (LOG.isInfoEnabled()) { LOG.info("using docker's cgroups options"); } try { CGroupsHandler cGroupsHandler = ResourceHandlerModule .getCGroupsHandler(conf); String cGroupPath = "/" + cGroupsHandler.getRelativePathForCGroup( containerIdStr); if (LOG.isInfoEnabled()) { LOG.info("using cgroup parent: " + cGroupPath); } runCommand.setCGroupParent(cGroupPath); } catch (ResourceHandlerException e) { LOG.warn("unable to use cgroups handler. Exception: ", e); throw new ContainerExecutionException(e); } } } @Override public void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); Map<String, String> environment = container.getLaunchContext() .getEnvironment(); String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE); if (imageName == null) { throw new ContainerExecutionException(ENV_DOCKER_CONTAINER_IMAGE + " not set!"); } String containerIdStr = container.getContainerId().toString(); String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER); Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR); //List<String> -> stored as List -> fetched/converted to List<String> //we can't do better here thanks to type-erasure @SuppressWarnings("unchecked") List<String> localDirs = ctx.getExecutionAttribute(LOCAL_DIRS); @SuppressWarnings("unchecked") List<String> logDirs = ctx.getExecutionAttribute(LOG_DIRS); @SuppressWarnings("unchecked") DockerRunCommand runCommand = new DockerRunCommand(containerIdStr, runAsUser, imageName) .detachOnRun() .setContainerWorkDir(containerWorkDir.toString()) .setNetworkType("host") .addMountLocation("/etc/passwd", "/etc/password:ro"); List<String> allDirs = new ArrayList<>(localDirs); allDirs.add(containerWorkDir.toString()); allDirs.addAll(logDirs); for (String dir: allDirs) { runCommand.addMountLocation(dir, dir); } String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS); /** Disabling docker's cgroup parent support for the time being. Docker * needs to use a more recent libcontainer that supports net_cls. In * addition we also need to revisit current cgroup creation in YARN. */ //addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand); Path nmPrivateContainerScriptPath = ctx.getExecutionAttribute( NM_PRIVATE_CONTAINER_SCRIPT_PATH); String disableOverride = environment.get( ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE); if (disableOverride != null && disableOverride.equals("true")) { if (LOG.isInfoEnabled()) { LOG.info("command override disabled"); } } else { List<String> overrideCommands = new ArrayList<>(); Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); overrideCommands.add("bash"); overrideCommands.add(launchDst.toUri().getPath()); runCommand.setOverrideCommandWithArgs(overrideCommands); } String commandFile = dockerClient.writeCommandToTempFile(runCommand, containerIdStr); PrivilegedOperation launchOp = new PrivilegedOperation( PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER, (String) null); launchOp.appendArgs(runAsUser, ctx.getExecutionAttribute(USER), Integer.toString(PrivilegedOperation .RunAsUserCommand.LAUNCH_DOCKER_CONTAINER.getValue()), ctx.getExecutionAttribute(APPID), containerIdStr, containerWorkDir.toString(), nmPrivateContainerScriptPath.toUri().getPath(), ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(), ctx.getExecutionAttribute(PID_FILE_PATH).toString(), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, localDirs), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, logDirs), commandFile, resourcesOpts); String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE); if (tcCommandFile != null) { launchOp.appendArgs(tcCommandFile); } try { privilegedOperationExecutor.executePrivilegedOperation(null, launchOp, null, container.getLaunchContext().getEnvironment(), false); } catch (PrivilegedOperationException e) { LOG.warn("Launch container failed. Exception: ", e); throw new ContainerExecutionException("Launch container failed", e .getExitCode(), e.getOutput(), e.getErrorOutput()); } } @Override public void signalContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { Container container = ctx.getContainer(); PrivilegedOperation signalOp = new PrivilegedOperation( PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null); signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), ctx.getExecutionAttribute(USER), Integer.toString(PrivilegedOperation .RunAsUserCommand.SIGNAL_CONTAINER.getValue()), ctx.getExecutionAttribute(PID), Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue())); try { PrivilegedOperationExecutor executor = PrivilegedOperationExecutor .getInstance(conf); executor.executePrivilegedOperation(null, signalOp, null, container.getLaunchContext().getEnvironment(), false); } catch (PrivilegedOperationException e) { LOG.warn("Signal container failed. Exception: ", e); throw new ContainerExecutionException("Signal container failed", e .getExitCode(), e.getOutput(), e.getErrorOutput()); } } @Override public void reapContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { } }
10,775
38.472527
120
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @InterfaceAudience.Private @InterfaceStability.Unstable /** Represents a docker sub-command * e.g 'run', 'load', 'inspect' etc., */ public abstract class DockerCommand { private final String command; private final List<String> commandWithArguments; protected DockerCommand(String command) { this.command = command; this.commandWithArguments = new ArrayList<>(); commandWithArguments.add(command); } /** Returns the docker sub-command string being used * e.g 'run' */ public final String getCommandOption() { return this.command; } /** Add command commandWithArguments - this method is only meant for use by * sub-classes * @param arguments to be added */ protected final void addCommandArguments(String... arguments) { this.commandWithArguments.addAll(Arrays.asList(arguments)); } public String getCommandWithArguments() { return StringUtils.join(" ", commandWithArguments); } }
2,110
30.984848
88
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; @InterfaceAudience.Private @InterfaceStability.Unstable public final class DockerClient { private static final Log LOG = LogFactory.getLog(DockerClient.class); private static final String TMP_FILE_PREFIX = "docker."; private static final String TMP_FILE_SUFFIX = ".cmd"; private final String tmpDirPath; public DockerClient(Configuration conf) throws ContainerExecutionException { String tmpDirBase = conf.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ContainerExecutionException("hadoop.tmp.dir not set!"); } tmpDirPath = tmpDirBase + "/nm-docker-cmds"; File tmpDir = new File(tmpDirPath); if (!(tmpDir.exists() || tmpDir.mkdirs())) { LOG.warn("Unable to create directory: " + tmpDirPath); throw new ContainerExecutionException("Unable to create directory: " + tmpDirPath); } } public String writeCommandToTempFile(DockerCommand cmd, String filePrefix) throws ContainerExecutionException { File dockerCommandFile = null; try { dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix, TMP_FILE_SUFFIX, new File(tmpDirPath)); Writer writer = new OutputStreamWriter(new FileOutputStream(dockerCommandFile), "UTF-8"); PrintWriter printWriter = new PrintWriter(writer); printWriter.print(cmd.getCommandWithArguments()); printWriter.close(); return dockerCommandFile.getAbsolutePath(); } catch (IOException e) { LOG.warn("Unable to write docker command to temporary file!"); throw new ContainerExecutionException(e); } } }
3,172
37.228916
107
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; import org.apache.hadoop.util.StringUtils; import java.util.ArrayList; import java.util.List; public class DockerRunCommand extends DockerCommand { private static final String RUN_COMMAND = "run"; private final String image; private List<String> overrrideCommandWithArgs; /** The following are mandatory: */ public DockerRunCommand(String containerId, String user, String image) { super(RUN_COMMAND); super.addCommandArguments("--name=" + containerId, "--user=" + user); this.image = image; } public DockerRunCommand removeContainerOnExit() { super.addCommandArguments("--rm"); return this; } public DockerRunCommand detachOnRun() { super.addCommandArguments("-d"); return this; } public DockerRunCommand setContainerWorkDir(String workdir) { super.addCommandArguments("--workdir=" + workdir); return this; } public DockerRunCommand setNetworkType(String type) { super.addCommandArguments("--net=" + type); return this; } public DockerRunCommand addMountLocation(String sourcePath, String destinationPath) { super.addCommandArguments("-v", sourcePath + ":" + destinationPath); return this; } public DockerRunCommand setCGroupParent(String parentPath) { super.addCommandArguments("--cgroup-parent=" + parentPath); return this; } public DockerRunCommand addDevice(String sourceDevice, String destinationDevice) { super.addCommandArguments("--device=" + sourceDevice + ":" + destinationDevice); return this; } public DockerRunCommand enableDetach() { super.addCommandArguments("--detach=true"); return this; } public DockerRunCommand disableDetach() { super.addCommandArguments("--detach=false"); return this; } public DockerRunCommand setOverrideCommandWithArgs( List<String> overrideCommandWithArgs) { this.overrrideCommandWithArgs = overrideCommandWithArgs; return this; } @Override public String getCommandWithArguments() { List<String> argList = new ArrayList<>(); argList.add(super.getCommandWithArguments()); argList.add(image); if (overrrideCommandWithArgs != null) { argList.addAll(overrrideCommandWithArgs); } return StringUtils.join(" ", argList); } }
3,203
28.666667
88
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerLoadCommand.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; public class DockerLoadCommand extends DockerCommand { private static final String LOAD_COMMAND = "load"; public DockerLoadCommand(String localImageFile) { super(LOAD_COMMAND); super.addCommandArguments("--i=" + localImageFile); } }
1,168
36.709677
88
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; import org.apache.hadoop.yarn.exceptions.YarnException; public class PrivilegedOperationException extends YarnException { private static final long serialVersionUID = 1L; private Integer exitCode; private String output; private String errorOutput; public PrivilegedOperationException() { super(); } public PrivilegedOperationException(String message) { super(message); } public PrivilegedOperationException(String message, Integer exitCode, String output, String errorOutput) { super(message); this.exitCode = exitCode; this.output = output; this.errorOutput = errorOutput; } public PrivilegedOperationException(Throwable cause) { super(cause); } public PrivilegedOperationException(Throwable cause, Integer exitCode, String output, String errorOutput) { super(cause); this.exitCode = exitCode; this.output = output; this.errorOutput = errorOutput; } public PrivilegedOperationException(String message, Throwable cause) { super(message, cause); } public Integer getExitCode() { return exitCode; } public String getOutput() { return output; } public String getErrorOutput() { return errorOutput; } }
2,126
28.957746
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** * Represents operations that require higher system privileges - e.g * creating cgroups, launching containers as specified users, 'tc' commands etc * that are completed using the container-executor binary */ @InterfaceAudience.Private @InterfaceStability.Unstable public class PrivilegedOperation { public final static char LINUX_FILE_PATH_SEPARATOR = '%'; public enum OperationType { CHECK_SETUP("--checksetup"), MOUNT_CGROUPS("--mount-cgroups"), INITIALIZE_CONTAINER(""), //no CLI switch supported yet LAUNCH_CONTAINER(""), //no CLI switch supported yet SIGNAL_CONTAINER(""), //no CLI switch supported yet DELETE_AS_USER(""), //no CLI switch supported yet LAUNCH_DOCKER_CONTAINER(""), //no CLI switch supported yet TC_MODIFY_STATE("--tc-modify-state"), TC_READ_STATE("--tc-read-state"), TC_READ_STATS("--tc-read-stats"), ADD_PID_TO_CGROUP(""), //no CLI switch supported yet. RUN_DOCKER_CMD("--run-docker"); private final String option; OperationType(String option) { this.option = option; } public String getOption() { return option; } } public static final String CGROUP_ARG_PREFIX = "cgroups="; public static final String CGROUP_ARG_NO_TASKS = "none"; private final OperationType opType; private final List<String> args; public PrivilegedOperation(OperationType opType, String arg) { this.opType = opType; this.args = new ArrayList<String>(); if (arg != null) { this.args.add(arg); } } public PrivilegedOperation(OperationType opType, List<String> args) { this.opType = opType; this.args = new ArrayList<String>(); if (args != null) { this.args.addAll(args); } } public void appendArgs(String... args) { for (String arg : args) { this.args.add(arg); } } public void appendArgs(List<String> args) { this.args.addAll(args); } public OperationType getOperationType() { return opType; } public List<String> getArguments() { return Collections.unmodifiableList(this.args); } @Override public boolean equals(Object other) { if (other == null || !(other instanceof PrivilegedOperation)) { return false; } PrivilegedOperation otherOp = (PrivilegedOperation) other; return otherOp.opType.equals(opType) && otherOp.args.equals(args); } @Override public int hashCode() { return opType.hashCode() + 97 * args.hashCode(); } /** * List of commands that the container-executor will execute. */ public enum RunAsUserCommand { INITIALIZE_CONTAINER(0), LAUNCH_CONTAINER(1), SIGNAL_CONTAINER(2), DELETE_AS_USER(3), LAUNCH_DOCKER_CONTAINER(4); private int value; RunAsUserCommand(int value) { this.value = value; } public int getValue() { return value; } } /** * Result codes returned from the C container-executor. * These must match the values in container-executor.h. */ public enum ResultCode { OK(0), INVALID_USER_NAME(2), UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7), INVALID_CONTAINER_PID(9), INVALID_CONTAINER_EXEC_PERMISSIONS(22), INVALID_CONFIG_FILE(24), WRITE_CGROUP_FAILED(27); private final int value; ResultCode(int value) { this.value = value; } public int getValue() { return value; } } }
4,487
26.365854
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; /** * provides mechanisms to execute PrivilegedContainerOperations * */ @InterfaceAudience.Private @InterfaceStability.Unstable public class PrivilegedOperationExecutor { private static final Log LOG = LogFactory.getLog(PrivilegedOperationExecutor .class); private volatile static PrivilegedOperationExecutor instance; private String containerExecutorExe; public static String getContainerExecutorExecutablePath(Configuration conf) { String yarnHomeEnvVar = System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key()); File hadoopBin = new File(yarnHomeEnvVar, "bin"); String defaultPath = new File(hadoopBin, "container-executor").getAbsolutePath(); return null == conf ? defaultPath : conf.get(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, defaultPath); } private void init(Configuration conf) { containerExecutorExe = getContainerExecutorExecutablePath(conf); } private PrivilegedOperationExecutor(Configuration conf) { init(conf); } public static PrivilegedOperationExecutor getInstance(Configuration conf) { if (instance == null) { synchronized (PrivilegedOperationExecutor.class) { if (instance == null) { instance = new PrivilegedOperationExecutor(conf); } } } return instance; } /** * @param prefixCommands in some cases ( e.g priorities using nice ), * prefix commands are necessary * @param operation the type and arguments for the operation to be * executed * @return execution string array for priviledged operation */ public String[] getPrivilegedOperationExecutionCommand(List<String> prefixCommands, PrivilegedOperation operation) { List<String> fullCommand = new ArrayList<String>(); if (prefixCommands != null && !prefixCommands.isEmpty()) { fullCommand.addAll(prefixCommands); } fullCommand.add(containerExecutorExe); String cliSwitch = operation.getOperationType().getOption(); if (!cliSwitch.isEmpty()) { fullCommand.add(cliSwitch); } fullCommand.addAll(operation.getArguments()); String[] fullCommandArray = fullCommand.toArray(new String[fullCommand.size()]); if (LOG.isDebugEnabled()) { LOG.debug("Privileged Execution Command Array: " + Arrays.toString(fullCommandArray)); } return fullCommandArray; } /** * Executes a privileged operation. It is up to the callers to ensure that * each privileged operation's parameters are constructed correctly. The * parameters are passed verbatim to the container-executor binary. * * @param prefixCommands in some cases ( e.g priorities using nice ), * prefix commands are necessary * @param operation the type and arguments for the operation to be executed * @param workingDir (optional) working directory for execution * @param env (optional) env of the command will include specified vars * @param grabOutput return (possibly large) shell command output * @return stdout contents from shell executor - useful for some privileged * operations - e.g --tc_read * @throws org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException */ public String executePrivilegedOperation(List<String> prefixCommands, PrivilegedOperation operation, File workingDir, Map<String, String> env, boolean grabOutput) throws PrivilegedOperationException { String[] fullCommandArray = getPrivilegedOperationExecutionCommand (prefixCommands, operation); ShellCommandExecutor exec = new ShellCommandExecutor(fullCommandArray, workingDir, env); try { exec.execute(); if (LOG.isDebugEnabled()) { LOG.debug("command array:"); LOG.debug(Arrays.toString(fullCommandArray)); LOG.debug("Privileged Execution Operation Output:"); LOG.debug(exec.getOutput()); } } catch (ExitCodeException e) { String logLine = new StringBuffer("Shell execution returned exit code: ") .append(exec.getExitCode()) .append(". Privileged Execution Operation Output: ") .append(System.lineSeparator()).append(exec.getOutput()).toString(); LOG.warn(logLine); //stderr from shell executor seems to be stuffed into the exception //'message' - so, we have to extract it and set it as the error out throw new PrivilegedOperationException(e, e.getExitCode(), exec.getOutput(), e.getMessage()); } catch (IOException e) { LOG.warn("IOException executing command: ", e); throw new PrivilegedOperationException(e); } if (grabOutput) { return exec.getOutput(); } return null; } /** * Executes a privileged operation. It is up to the callers to ensure that * each privileged operation's parameters are constructed correctly. The * parameters are passed verbatim to the container-executor binary. * * @param operation the type and arguments for the operation to be executed * @param grabOutput return (possibly large) shell command output * @return stdout contents from shell executor - useful for some privileged * operations - e.g --tc_read * @throws org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException */ public String executePrivilegedOperation(PrivilegedOperation operation, boolean grabOutput) throws PrivilegedOperationException { return executePrivilegedOperation(null, operation, null, null, grabOutput); } //Utility functions for squashing together operations in supported ways //At some point, we need to create a generalized mechanism that uses a set //of squashing 'rules' to squash an set of PrivilegedOperations of varying //types - e.g Launch Container + Add Pid to CGroup(s) + TC rules /** * Squash operations for cgroups - e.g mount, add pid to cgroup etc ., * For now, we only implement squashing for 'add pid to cgroup' since this * is the only optimization relevant to launching containers * * @return single squashed cgroup operation. Null on failure. */ public static PrivilegedOperation squashCGroupOperations (List<PrivilegedOperation> ops) throws PrivilegedOperationException { if (ops.size() == 0) { return null; } StringBuffer finalOpArg = new StringBuffer(PrivilegedOperation .CGROUP_ARG_PREFIX); boolean noTasks = true; for (PrivilegedOperation op : ops) { if (!op.getOperationType() .equals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP)) { LOG.warn("Unsupported operation type: " + op.getOperationType()); throw new PrivilegedOperationException("Unsupported operation type:" + op.getOperationType()); } List<String> args = op.getArguments(); if (args.size() != 1) { LOG.warn("Invalid number of args: " + args.size()); throw new PrivilegedOperationException("Invalid number of args: " + args.size()); } String arg = args.get(0); String tasksFile = StringUtils.substringAfter(arg, PrivilegedOperation.CGROUP_ARG_PREFIX); if (tasksFile == null || tasksFile.isEmpty()) { LOG.warn("Invalid argument: " + arg); throw new PrivilegedOperationException("Invalid argument: " + arg); } if (tasksFile.equals(PrivilegedOperation.CGROUP_ARG_NO_TASKS)) { //Don't append to finalOpArg continue; } if (noTasks == false) { //We have already appended at least one tasks file. finalOpArg.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR); finalOpArg.append(tasksFile); } else { finalOpArg.append(tasksFile); noTasks = false; } } if (noTasks) { finalOpArg.append(PrivilegedOperation.CGROUP_ARG_NO_TASKS); //there // were no tasks file to append } PrivilegedOperation finalOp = new PrivilegedOperation( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, finalOpArg .toString()); return finalOp; } }
9,907
35.832714
117
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.util.SystemClock; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @InterfaceAudience.Private @InterfaceStability.Unstable public class TrafficControlBandwidthHandlerImpl implements OutboundBandwidthResourceHandler { private static final Log LOG = LogFactory .getLog(TrafficControlBandwidthHandlerImpl.class); //In the absence of 'scheduling' support, we'll 'infer' the guaranteed //outbound bandwidth for each container based on this number. This will //likely go away once we add support on the RM for this resource type. private static final int MAX_CONTAINER_COUNT = 50; private final PrivilegedOperationExecutor privilegedOperationExecutor; private final CGroupsHandler cGroupsHandler; private final TrafficController trafficController; private final ConcurrentHashMap<ContainerId, Integer> containerIdClassIdMap; private Configuration conf; private String device; private boolean strictMode; private int containerBandwidthMbit; private int rootBandwidthMbit; private int yarnBandwidthMbit; public TrafficControlBandwidthHandlerImpl(PrivilegedOperationExecutor privilegedOperationExecutor, CGroupsHandler cGroupsHandler, TrafficController trafficController) { this.privilegedOperationExecutor = privilegedOperationExecutor; this.cGroupsHandler = cGroupsHandler; this.trafficController = trafficController; this.containerIdClassIdMap = new ConcurrentHashMap<>(); } /** * Bootstrapping 'outbound-bandwidth' resource handler - mounts net_cls * controller and bootstraps a traffic control bandwidth shaping hierarchy * @param configuration yarn configuration in use * @return (potentially empty) list of privileged operations to execute. * @throws ResourceHandlerException */ @Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { conf = configuration; //We'll do this inline for the time being - since this is a one time //operation. At some point, LCE code can be refactored to batch mount //operations across multiple controllers - cpu, net_cls, blkio etc cGroupsHandler .mountCGroupController(CGroupsHandler.CGroupController.NET_CLS); device = conf.get(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE, YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE); strictMode = configuration.getBoolean(YarnConfiguration .NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, YarnConfiguration .DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); rootBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, YarnConfiguration .DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT); yarnBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, rootBandwidthMbit); containerBandwidthMbit = (int) Math.ceil((double) yarnBandwidthMbit / MAX_CONTAINER_COUNT); StringBuffer logLine = new StringBuffer("strict mode is set to :") .append(strictMode).append(System.lineSeparator()); if (strictMode) { logLine.append("container bandwidth will be capped to soft limit.") .append(System.lineSeparator()); } else { logLine.append( "containers will be allowed to use spare YARN bandwidth.") .append(System.lineSeparator()); } logLine .append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ") .append(containerBandwidthMbit); LOG.info(logLine); trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit); return null; } /** * Pre-start hook for 'outbound-bandwidth' resource. A cgroup is created * and a net_cls classid is generated and written to a cgroup file. A * traffic control shaping rule is created in order to limit outbound * bandwidth utilization. * @param container Container being launched * @return privileged operations for some cgroups/tc operations. * @throws ResourceHandlerException */ @Override public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException { String containerIdStr = container.getContainerId().toString(); int classId = trafficController.getNextClassId(); String classIdStr = trafficController.getStringForNetClsClassId(classId); cGroupsHandler.createCGroup(CGroupsHandler.CGroupController .NET_CLS, containerIdStr); cGroupsHandler.updateCGroupParam(CGroupsHandler.CGroupController.NET_CLS, containerIdStr, CGroupsHandler.CGROUP_PARAM_CLASSID, classIdStr); containerIdClassIdMap.put(container.getContainerId(), classId); //Now create a privileged operation in order to update the tasks file with //the pid of the running container process (root of process tree). This can //only be done at the time of launching the container, in a privileged //executable. String tasksFile = cGroupsHandler.getPathForCGroupTasks( CGroupsHandler.CGroupController.NET_CLS, containerIdStr); String opArg = new StringBuffer(PrivilegedOperation.CGROUP_ARG_PREFIX) .append(tasksFile).toString(); List<PrivilegedOperation> ops = new ArrayList<>(); ops.add(new PrivilegedOperation( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, opArg)); //Create a privileged operation to create a tc rule for this container //We'll return this to the calling (Linux) Container Executor //implementation for batching optimizations so that we don't fork/exec //additional times during container launch. TrafficController.BatchBuilder builder = trafficController.new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE); builder.addContainerClass(classId, containerBandwidthMbit, strictMode); ops.add(builder.commitBatchToTempFile()); return ops; } /** * Reacquires state for a container - reads the classid from the cgroup * being used for the container being reacquired * @param containerId if of the container being reacquired. * @return (potentially empty) list of privileged operations * @throws ResourceHandlerException */ @Override public List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException { String containerIdStr = containerId.toString(); if (LOG.isDebugEnabled()) { LOG.debug("Attempting to reacquire classId for container: " + containerIdStr); } String classIdStrFromFile = cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.NET_CLS, containerIdStr, CGroupsHandler.CGROUP_PARAM_CLASSID); int classId = trafficController .getClassIdFromFileContents(classIdStrFromFile); LOG.info("Reacquired containerId -> classId mapping: " + containerIdStr + " -> " + classId); containerIdClassIdMap.put(containerId, classId); return null; } /** * Returns total bytes sent per container to be used for metrics tracking * purposes. * @return a map of containerId to bytes sent * @throws ResourceHandlerException */ public Map<ContainerId, Integer> getBytesSentPerContainer() throws ResourceHandlerException { Map<Integer, Integer> classIdStats = trafficController.readStats(); Map<ContainerId, Integer> containerIdStats = new HashMap<>(); for (Map.Entry<ContainerId, Integer> entry : containerIdClassIdMap .entrySet()) { ContainerId containerId = entry.getKey(); Integer classId = entry.getValue(); Integer bytesSent = classIdStats.get(classId); if (bytesSent == null) { LOG.warn("No bytes sent metric found for container: " + containerId + " with classId: " + classId); continue; } containerIdStats.put(containerId, bytesSent); } return containerIdStats; } /** * Cleanup operations once container is completed - deletes cgroup and * removes traffic shaping rule(s). * @param containerId of the container that was completed. * @return * @throws ResourceHandlerException */ @Override public List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException { LOG.info("postComplete for container: " + containerId.toString()); cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.NET_CLS, containerId.toString()); Integer classId = containerIdClassIdMap.get(containerId); if (classId != null) { PrivilegedOperation op = trafficController.new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE) .deleteContainerClass(classId).commitBatchToTempFile(); try { privilegedOperationExecutor.executePrivilegedOperation(op, false); trafficController.releaseClassId(classId); } catch (PrivilegedOperationException e) { LOG.warn("Failed to delete tc rule for classId: " + classId); throw new ResourceHandlerException( "Failed to delete tc rule for classId:" + classId); } } else { LOG.warn("Not cleaning up tc rules. classId unknown for container: " + containerId.toString()); } return null; } @Override public List<PrivilegedOperation> teardown() throws ResourceHandlerException { if (LOG.isDebugEnabled()) { LOG.debug("teardown(): Nothing to do"); } return null; } }
11,328
39.173759
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import java.util.ArrayList; import java.util.List; /** * Provides mechanisms to get various resource handlers - cpu, memory, network, * disk etc., - based on configuration. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ResourceHandlerModule { private static volatile ResourceHandlerChain resourceHandlerChain; /** * This specific implementation might provide resource management as well * as resource metrics functionality. We need to ensure that the same * instance is used for both. */ private static volatile TrafficControlBandwidthHandlerImpl trafficControlBandwidthHandler; private static volatile CGroupsHandler cGroupsHandler; private static volatile CGroupsBlkioResourceHandlerImpl cGroupsBlkioResourceHandler; /** * Returns an initialized, thread-safe CGroupsHandler instance. */ public static CGroupsHandler getCGroupsHandler(Configuration conf) throws ResourceHandlerException { if (cGroupsHandler == null) { synchronized (CGroupsHandler.class) { if (cGroupsHandler == null) { cGroupsHandler = new CGroupsHandlerImpl(conf, PrivilegedOperationExecutor.getInstance(conf)); } } } return cGroupsHandler; } private static TrafficControlBandwidthHandlerImpl getTrafficControlBandwidthHandler(Configuration conf) throws ResourceHandlerException { if (conf.getBoolean(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED, YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_ENABLED)) { if (trafficControlBandwidthHandler == null) { synchronized (OutboundBandwidthResourceHandler.class) { if (trafficControlBandwidthHandler == null) { trafficControlBandwidthHandler = new TrafficControlBandwidthHandlerImpl(PrivilegedOperationExecutor .getInstance(conf), getCGroupsHandler(conf), new TrafficController(conf, PrivilegedOperationExecutor .getInstance(conf))); } } } return trafficControlBandwidthHandler; } else { return null; } } public static OutboundBandwidthResourceHandler getOutboundBandwidthResourceHandler(Configuration conf) throws ResourceHandlerException { return getTrafficControlBandwidthHandler(conf); } public static DiskResourceHandler getDiskResourceHandler(Configuration conf) throws ResourceHandlerException { if (conf.getBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, YarnConfiguration.DEFAULT_NM_DISK_RESOURCE_ENABLED)) { return getCgroupsBlkioResourceHandler(conf); } return null; } private static CGroupsBlkioResourceHandlerImpl getCgroupsBlkioResourceHandler( Configuration conf) throws ResourceHandlerException { if (cGroupsBlkioResourceHandler == null) { synchronized (DiskResourceHandler.class) { if (cGroupsBlkioResourceHandler == null) { cGroupsBlkioResourceHandler = new CGroupsBlkioResourceHandlerImpl(getCGroupsHandler(conf)); } } } return cGroupsBlkioResourceHandler; } private static void addHandlerIfNotNull(List<ResourceHandler> handlerList, ResourceHandler handler) { if (handler != null) { handlerList.add(handler); } } private static void initializeConfiguredResourceHandlerChain( Configuration conf) throws ResourceHandlerException { ArrayList<ResourceHandler> handlerList = new ArrayList<>(); addHandlerIfNotNull(handlerList, getOutboundBandwidthResourceHandler(conf)); addHandlerIfNotNull(handlerList, getDiskResourceHandler(conf)); resourceHandlerChain = new ResourceHandlerChain(handlerList); } public static ResourceHandlerChain getConfiguredResourceHandlerChain( Configuration conf) throws ResourceHandlerException { if (resourceHandlerChain == null) { synchronized (ResourceHandlerModule.class) { if (resourceHandlerChain == null) { initializeConfiguredResourceHandlerChain(conf); } } } if (resourceHandlerChain.getResourceHandlerList().size() != 0) { return resourceHandlerChain; } else { return null; } } @VisibleForTesting static void nullifyResourceHandlerChain() throws ResourceHandlerException { resourceHandlerChain = null; } }
5,668
34.43125
111
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import java.io.*; import java.util.ArrayList; import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Wrapper around the 'tc' tool. Provides access to a very specific subset of * the functionality provided by the tc tool. */ @InterfaceAudience.Private @InterfaceStability.Unstable class TrafficController { private static final Log LOG = LogFactory.getLog(TrafficController.class); private static final int ROOT_QDISC_HANDLE = 42; private static final int ZERO_CLASS_ID = 0; private static final int ROOT_CLASS_ID = 1; /** Traffic shaping class used for all unclassified traffic */ private static final int DEFAULT_CLASS_ID = 2; /** Traffic shaping class used for all YARN traffic */ private static final int YARN_ROOT_CLASS_ID = 3; /** Classes 0-3 are used already. We need to ensure that container classes * do not collide with these classids. */ private static final int MIN_CONTAINER_CLASS_ID = 4; /** This is the number of distinct (container) traffic shaping classes * that are supported */ private static final int MAX_CONTAINER_CLASSES = 1024; private static final String MBIT_SUFFIX = "mbit"; private static final String TMP_FILE_PREFIX = "tc."; private static final String TMP_FILE_SUFFIX = ".cmds"; /** Root queuing discipline attached to the root of the interface */ private static final String FORMAT_QDISC_ADD_TO_ROOT_WITH_DEFAULT = "qdisc add dev %s root handle %d: htb default %s"; /** Specifies a cgroup/classid based filter - based on the classid associated * with the outbound packet, the corresponding traffic shaping rule is used * . Please see tc documentation for additional details. */ private static final String FORMAT_FILTER_CGROUP_ADD_TO_PARENT = "filter add dev %s parent %d: protocol ip prio 10 handle 1: cgroup"; /** Standard format for adding a traffic shaping class to a parent, with * the specified bandwidth limits */ private static final String FORMAT_CLASS_ADD_TO_PARENT_WITH_RATES = "class add dev %s parent %d:%d classid %d:%d htb rate %s ceil %s"; /** Standard format to delete a traffic shaping class */ private static final String FORMAT_DELETE_CLASS = "class del dev %s classid %d:%d"; /** Format of the classid that is to be used with the net_cls cgroup. Needs * to be of the form 0xAAAABBBB */ private static final String FORMAT_NET_CLS_CLASS_ID = "0x%04d%04d"; /** Commands to read the qdsic(s)/filter(s)/class(es) associated with an * interface */ private static final String FORMAT_READ_STATE = "qdisc show dev %1$s%n" + "filter show dev %1$s%n" + "class show dev %1$s"; private static final String FORMAT_READ_CLASSES = "class show dev %s"; /** Delete a qdisc and all its children - classes/filters etc */ private static final String FORMAT_WIPE_STATE = "qdisc del dev %s parent root"; private final Configuration conf; //Used to store the set of classids in use for container classes private final BitSet classIdSet; private final PrivilegedOperationExecutor privilegedOperationExecutor; private String tmpDirPath; private String device; private int rootBandwidthMbit; private int yarnBandwidthMbit; private int defaultClassBandwidthMbit; TrafficController(Configuration conf, PrivilegedOperationExecutor exec) { this.conf = conf; this.classIdSet = new BitSet(MAX_CONTAINER_CLASSES); this.privilegedOperationExecutor = exec; } /** * Bootstrap tc configuration */ public void bootstrap(String device, int rootBandwidthMbit, int yarnBandwidthMbit) throws ResourceHandlerException { if (device == null) { throw new ResourceHandlerException("device cannot be null!"); } String tmpDirBase = conf.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ResourceHandlerException("hadoop.tmp.dir not set!"); } tmpDirPath = tmpDirBase + "/nm-tc-rules"; File tmpDir = new File(tmpDirPath); if (!(tmpDir.exists() || tmpDir.mkdirs())) { LOG.warn("Unable to create directory: " + tmpDirPath); throw new ResourceHandlerException("Unable to create directory: " + tmpDirPath); } this.device = device; this.rootBandwidthMbit = rootBandwidthMbit; this.yarnBandwidthMbit = yarnBandwidthMbit; defaultClassBandwidthMbit = (rootBandwidthMbit - yarnBandwidthMbit) <= 0 ? rootBandwidthMbit : (rootBandwidthMbit - yarnBandwidthMbit); boolean recoveryEnabled = conf.getBoolean(YarnConfiguration .NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED); String state = null; if (!recoveryEnabled) { LOG.info("NM recovery is not enabled. We'll wipe tc state before proceeding."); } else { //NM recovery enabled - run a state check state = readState(); if (checkIfAlreadyBootstrapped(state)) { LOG.info("TC configuration is already in place. Not wiping state."); //We already have the list of existing container classes, if any //that were created after bootstrapping reacquireContainerClasses(state); return; } else { LOG.info("TC configuration is incomplete. Wiping tc state before proceeding"); } } wipeState(); //start over in case preview bootstrap was incomplete initializeState(); } private void initializeState() throws ResourceHandlerException { LOG.info("Initializing tc state."); BatchBuilder builder = new BatchBuilder(PrivilegedOperation. OperationType.TC_MODIFY_STATE) .addRootQDisc() .addCGroupFilter() .addClassToRootQDisc(rootBandwidthMbit) .addDefaultClass(defaultClassBandwidthMbit, rootBandwidthMbit) //yarn bandwidth is capped with rate = ceil .addYARNRootClass(yarnBandwidthMbit, yarnBandwidthMbit); PrivilegedOperation op = builder.commitBatchToTempFile(); try { privilegedOperationExecutor.executePrivilegedOperation(op, false); } catch (PrivilegedOperationException e) { LOG.warn("Failed to bootstrap outbound bandwidth configuration"); throw new ResourceHandlerException( "Failed to bootstrap outbound bandwidth configuration", e); } } /** * Function to check if the interface in use has already been fully * bootstrapped with the required tc configuration * * @return boolean indicating the result of the check */ private boolean checkIfAlreadyBootstrapped(String state) throws ResourceHandlerException { List<String> regexes = new ArrayList<>(); //root qdisc regexes.add(String.format("^qdisc htb %d: root(.)*$", ROOT_QDISC_HANDLE)); //cgroup filter regexes.add(String.format("^filter parent %d: protocol ip " + "(.)*cgroup(.)*$", ROOT_QDISC_HANDLE)); //root, default and yarn classes regexes.add(String.format("^class htb %d:%d root(.)*$", ROOT_QDISC_HANDLE, ROOT_CLASS_ID)); regexes.add(String.format("^class htb %d:%d parent %d:%d(.)*$", ROOT_QDISC_HANDLE, DEFAULT_CLASS_ID, ROOT_QDISC_HANDLE, ROOT_CLASS_ID)); regexes.add(String.format("^class htb %d:%d parent %d:%d(.)*$", ROOT_QDISC_HANDLE, YARN_ROOT_CLASS_ID, ROOT_QDISC_HANDLE, ROOT_CLASS_ID)); for (String regex : regexes) { Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE); if (pattern.matcher(state).find()) { if (LOG.isDebugEnabled()) { LOG.debug("Matched regex: " + regex); } } else { String logLine = new StringBuffer("Failed to match regex: ") .append(regex).append(" Current state: ").append(state).toString(); LOG.warn(logLine); return false; } } LOG.info("Bootstrap check succeeded"); return true; } private String readState() throws ResourceHandlerException { //Sample state output: // qdisc htb 42: root refcnt 2 r2q 10 default 2 direct_packets_stat 0 // filter parent 42: protocol ip pref 10 cgroup handle 0x1 // // filter parent 42: protocol ip pref 10 cgroup handle 0x1 // // class htb 42:1 root rate 10000Kbit ceil 10000Kbit burst 1600b cburst 1600b // class htb 42:2 parent 42:1 prio 0 rate 3000Kbit ceil 10000Kbit burst 1599b cburst 1600b // class htb 42:3 parent 42:1 prio 0 rate 7000Kbit ceil 7000Kbit burst 1598b cburst 1598b BatchBuilder builder = new BatchBuilder(PrivilegedOperation. OperationType.TC_READ_STATE) .readState(); PrivilegedOperation op = builder.commitBatchToTempFile(); try { String output = privilegedOperationExecutor.executePrivilegedOperation(op, true); if (LOG.isDebugEnabled()) { LOG.debug("TC state: %n" + output); } return output; } catch (PrivilegedOperationException e) { LOG.warn("Failed to bootstrap outbound bandwidth rules"); throw new ResourceHandlerException( "Failed to bootstrap outbound bandwidth rules", e); } } private void wipeState() throws ResourceHandlerException { BatchBuilder builder = new BatchBuilder(PrivilegedOperation. OperationType.TC_MODIFY_STATE) .wipeState(); PrivilegedOperation op = builder.commitBatchToTempFile(); try { LOG.info("Wiping tc state."); privilegedOperationExecutor.executePrivilegedOperation(op, false); } catch (PrivilegedOperationException e) { LOG.warn("Failed to wipe tc state. This could happen if the interface" + " is already in its default state. Ignoring."); //Ignoring this exception. This could happen if the interface is already //in its default state. For this reason we don't throw a //ResourceHandlerException here. } } /** * Parses the current state looks for classids already in use */ private void reacquireContainerClasses(String state) { //At this point we already have already successfully passed //checkIfAlreadyBootstrapped() - so we know that at least the //root classes are in place. String tcClassesStr = state.substring(state.indexOf("class")); //one class per line - the results of the split will need to trimmed String[] tcClasses = Pattern.compile("$", Pattern.MULTILINE) .split(tcClassesStr); Pattern tcClassPattern = Pattern.compile(String.format( "class htb %d:(\\d+) .*", ROOT_QDISC_HANDLE)); synchronized (classIdSet) { for (String tcClassSplit : tcClasses) { String tcClass = tcClassSplit.trim(); if (!tcClass.isEmpty()) { Matcher classMatcher = tcClassPattern.matcher(tcClass); if (classMatcher.matches()) { int classId = Integer.parseInt(classMatcher.group(1)); if (classId >= MIN_CONTAINER_CLASS_ID) { classIdSet.set(classId - MIN_CONTAINER_CLASS_ID); LOG.info("Reacquired container classid: " + classId); } } else { LOG.warn("Unable to match classid in string:" + tcClass); } } } } } public Map<Integer, Integer> readStats() throws ResourceHandlerException { BatchBuilder builder = new BatchBuilder(PrivilegedOperation. OperationType.TC_READ_STATS) .readClasses(); PrivilegedOperation op = builder.commitBatchToTempFile(); try { String output = privilegedOperationExecutor.executePrivilegedOperation(op, true); if (LOG.isDebugEnabled()) { LOG.debug("TC stats output:" + output); } Map<Integer, Integer> classIdBytesStats = parseStatsString(output); if (LOG.isDebugEnabled()) { LOG.debug("classId -> bytes sent %n" + classIdBytesStats); } return classIdBytesStats; } catch (PrivilegedOperationException e) { LOG.warn("Failed to get tc stats"); throw new ResourceHandlerException("Failed to get tc stats", e); } } private Map<Integer, Integer> parseStatsString(String stats) { //Example class stats segment (multiple present in tc output) // class htb 42:4 parent 42:3 prio 0 rate 1000Kbit ceil 7000Kbit burst1600b cburst 1598b // Sent 77921300 bytes 52617 pkt (dropped 0, overlimits 0 requeues 0) // rate 6973Kbit 589pps backlog 0b 39p requeues 0 // lended: 3753 borrowed: 22514 giants: 0 // tokens: -122164 ctokens: -52488 String[] lines = Pattern.compile("$", Pattern.MULTILINE) .split(stats); Pattern tcClassPattern = Pattern.compile(String.format( "class htb %d:(\\d+) .*", ROOT_QDISC_HANDLE)); Pattern bytesPattern = Pattern.compile("Sent (\\d+) bytes.*"); int currentClassId = -1; Map<Integer, Integer> containerClassIdStats = new HashMap<>(); for (String lineSplit : lines) { String line = lineSplit.trim(); if (!line.isEmpty()) { //Check if we encountered a stats segment for a container class Matcher classMatcher = tcClassPattern.matcher(line); if (classMatcher.matches()) { int classId = Integer.parseInt(classMatcher.group(1)); if (classId >= MIN_CONTAINER_CLASS_ID) { currentClassId = classId; continue; } } //Check if we encountered a stats line Matcher bytesMatcher = bytesPattern.matcher(line); if (bytesMatcher.matches()) { //we found at least one class segment if (currentClassId != -1) { int bytes = Integer.parseInt(bytesMatcher.group(1)); containerClassIdStats.put(currentClassId, bytes); } else { LOG.warn("Matched a 'bytes sent' line outside of a class stats " + "segment : " + line); } continue; } //skip other kinds of non-empty lines - since we aren't interested in //them. } } return containerClassIdStats; } /** * Returns a formatted string for attaching a qdisc to the root of the * device/interface. Additional qdisc * parameters can be supplied - for example, the default 'class' to use for * incoming packets */ private String getStringForAddRootQDisc() { return String.format(FORMAT_QDISC_ADD_TO_ROOT_WITH_DEFAULT, device, ROOT_QDISC_HANDLE, DEFAULT_CLASS_ID); } /** * Returns a formatted string for a filter that matches packets based on the * presence of net_cls classids */ private String getStringForaAddCGroupFilter() { return String.format(FORMAT_FILTER_CGROUP_ADD_TO_PARENT, device, ROOT_QDISC_HANDLE); } /** * Get the next available classid. This has to be released post container * complete */ public int getNextClassId() throws ResourceHandlerException { synchronized (classIdSet) { int index = classIdSet.nextClearBit(0); if (index >= MAX_CONTAINER_CLASSES) { throw new ResourceHandlerException("Reached max container classes: " + MAX_CONTAINER_CLASSES); } classIdSet.set(index); return (index + MIN_CONTAINER_CLASS_ID); } } public void releaseClassId(int classId) throws ResourceHandlerException { synchronized (classIdSet) { int index = classId - MIN_CONTAINER_CLASS_ID; if (index < 0 || index >= MAX_CONTAINER_CLASSES) { throw new ResourceHandlerException("Invalid incoming classId: " + classId); } classIdSet.clear(index); } } /** * Returns a formatted string representing the given classId including a * handle */ public String getStringForNetClsClassId(int classId) { return String.format(FORMAT_NET_CLS_CLASS_ID, ROOT_QDISC_HANDLE, classId); } /** * A value read out of net_cls.classid file is in decimal form. We need to * convert to 32-bit/8 digit hex, extract the lower 16-bit/four digits * as an int */ public int getClassIdFromFileContents(String input) { //convert from decimal back to fixed size hex form //e.g 4325381 -> 00420005 String classIdStr = String.format("%08x", Integer.parseInt(input)); if (LOG.isDebugEnabled()) { LOG.debug("ClassId hex string : " + classIdStr); } //extract and return 4 digits //e.g 00420005 -> 0005 return Integer.parseInt(classIdStr.substring(4)); } /** * Adds a tc class to qdisc at root */ private String getStringForAddClassToRootQDisc(int rateMbit) { String rateMbitStr = rateMbit + MBIT_SUFFIX; //example : "class add dev eth0 parent 42:0 classid 42:1 htb rate 1000mbit // ceil 1000mbit" return String.format(FORMAT_CLASS_ADD_TO_PARENT_WITH_RATES, device, ROOT_QDISC_HANDLE, ZERO_CLASS_ID, ROOT_QDISC_HANDLE, ROOT_CLASS_ID, rateMbitStr, rateMbitStr); } private String getStringForAddDefaultClass(int rateMbit, int ceilMbit) { String rateMbitStr = rateMbit + MBIT_SUFFIX; String ceilMbitStr = ceilMbit + MBIT_SUFFIX; //example : "class add dev eth0 parent 42:1 classid 42:2 htb rate 300mbit // ceil 1000mbit" return String.format(FORMAT_CLASS_ADD_TO_PARENT_WITH_RATES, device, ROOT_QDISC_HANDLE, ROOT_CLASS_ID, ROOT_QDISC_HANDLE, DEFAULT_CLASS_ID, rateMbitStr, ceilMbitStr); } private String getStringForAddYARNRootClass(int rateMbit, int ceilMbit) { String rateMbitStr = rateMbit + MBIT_SUFFIX; String ceilMbitStr = ceilMbit + MBIT_SUFFIX; //example : "class add dev eth0 parent 42:1 classid 42:3 htb rate 700mbit // ceil 1000mbit" return String.format(FORMAT_CLASS_ADD_TO_PARENT_WITH_RATES, device, ROOT_QDISC_HANDLE, ROOT_CLASS_ID, ROOT_QDISC_HANDLE, YARN_ROOT_CLASS_ID, rateMbitStr, ceilMbitStr); } private String getStringForAddContainerClass(int classId, int rateMbit, int ceilMbit) { String rateMbitStr = rateMbit + MBIT_SUFFIX; String ceilMbitStr = ceilMbit + MBIT_SUFFIX; //example : "class add dev eth0 parent 42:99 classid 42:99 htb rate 50mbit // ceil 700mbit" return String.format(FORMAT_CLASS_ADD_TO_PARENT_WITH_RATES, device, ROOT_QDISC_HANDLE, YARN_ROOT_CLASS_ID, ROOT_QDISC_HANDLE, classId, rateMbitStr, ceilMbitStr); } private String getStringForDeleteContainerClass(int classId) { //example "class del dev eth0 classid 42:7" return String.format(FORMAT_DELETE_CLASS, device, ROOT_QDISC_HANDLE, classId); } private String getStringForReadState() { return String.format(FORMAT_READ_STATE, device); } private String getStringForReadClasses() { return String.format(FORMAT_READ_CLASSES, device); } private String getStringForWipeState() { return String.format(FORMAT_WIPE_STATE, device); } public class BatchBuilder { final PrivilegedOperation operation; final List<String> commands; public BatchBuilder(PrivilegedOperation.OperationType opType) throws ResourceHandlerException { switch (opType) { case TC_MODIFY_STATE: case TC_READ_STATE: case TC_READ_STATS: operation = new PrivilegedOperation(opType, (String) null); commands = new ArrayList<>(); break; default: throw new ResourceHandlerException("Not a tc operation type : " + opType); } } private BatchBuilder addRootQDisc() { commands.add(getStringForAddRootQDisc()); return this; } private BatchBuilder addCGroupFilter() { commands.add(getStringForaAddCGroupFilter()); return this; } private BatchBuilder addClassToRootQDisc(int rateMbit) { commands.add(getStringForAddClassToRootQDisc(rateMbit)); return this; } private BatchBuilder addDefaultClass(int rateMbit, int ceilMbit) { commands.add(getStringForAddDefaultClass(rateMbit, ceilMbit)); return this; } private BatchBuilder addYARNRootClass(int rateMbit, int ceilMbit) { commands.add(getStringForAddYARNRootClass(rateMbit, ceilMbit)); return this; } public BatchBuilder addContainerClass(int classId, int rateMbit, boolean strictMode) { int ceilMbit; if (strictMode) { ceilMbit = rateMbit; } else { ceilMbit = yarnBandwidthMbit; } commands.add(getStringForAddContainerClass(classId, rateMbit, ceilMbit)); return this; } public BatchBuilder deleteContainerClass(int classId) { commands.add(getStringForDeleteContainerClass(classId)); return this; } private BatchBuilder readState() { commands.add(getStringForReadState()); return this; } //We'll read all classes, but use a different tc operation type //when reading stats for all these classes. Stats are fetched using a //different tc cli option (-s). private BatchBuilder readClasses() { //We'll read all classes, but use a different tc operation type //for reading stats for all these classes. Stats are fetched using a //different tc cli option (-s). commands.add(getStringForReadClasses()); return this; } private BatchBuilder wipeState() { commands.add(getStringForWipeState()); return this; } public PrivilegedOperation commitBatchToTempFile() throws ResourceHandlerException { try { File tcCmds = File.createTempFile(TMP_FILE_PREFIX, TMP_FILE_SUFFIX, new File(tmpDirPath)); Writer writer = new OutputStreamWriter(new FileOutputStream(tcCmds), "UTF-8"); PrintWriter printWriter = new PrintWriter(writer); for (String command : commands) { printWriter.println(command); } printWriter.close(); operation.appendArgs(tcCmds.getAbsolutePath()); return operation; } catch (IOException e) { LOG.warn("Failed to create or write to temporary file in dir: " + tmpDirPath); throw new ResourceHandlerException( "Failed to create or write to temporary file in dir: " + tmpDirPath); } } } //end BatchBuilder }
23,704
35.41321
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Provides CGroups functionality. Implementations are expected to be * thread-safe */ @InterfaceAudience.Private @InterfaceStability.Unstable public interface CGroupsHandler { public enum CGroupController { CPU("cpu"), NET_CLS("net_cls"), BLKIO("blkio"); private final String name; CGroupController(String name) { this.name = name; } String getName() { return name; } } public static final String CGROUP_FILE_TASKS = "tasks"; public static final String CGROUP_PARAM_CLASSID = "classid"; public static final String CGROUP_PARAM_BLKIO_WEIGHT = "weight"; /** * Mounts a cgroup controller * @param controller - the controller being mounted * @throws ResourceHandlerException */ public void mountCGroupController(CGroupController controller) throws ResourceHandlerException; /** * Creates a cgroup for a given controller * @param controller - controller type for which the cgroup is being created * @param cGroupId - id of the cgroup being created * @return full path to created cgroup * @throws ResourceHandlerException */ public String createCGroup(CGroupController controller, String cGroupId) throws ResourceHandlerException; /** * Deletes the specified cgroup * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup being deleted * @throws ResourceHandlerException */ public void deleteCGroup(CGroupController controller, String cGroupId) throws ResourceHandlerException; /** * Gets the relative path for the cgroup, independent of a controller, for a * given cgroup id. * @param cGroupId - id of the cgroup * @return path for the cgroup relative to the root of (any) controller. */ public String getRelativePathForCGroup(String cGroupId); /** * Gets the full path for the cgroup, given a controller and a cgroup id * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup * @return full path for the cgroup */ public String getPathForCGroup(CGroupController controller, String cGroupId); /** * Gets the full path for the cgroup's tasks file, given a controller and a * cgroup id * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup * @return full path for the cgroup's tasks file */ public String getPathForCGroupTasks(CGroupController controller, String cGroupId); /** * Gets the full path for a cgroup parameter, given a controller, * cgroup id and parameter name * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup * @param param - cgroup parameter ( e.g classid ) * @return full path for the cgroup parameter */ public String getPathForCGroupParam(CGroupController controller, String cGroupId, String param); /** * updates a cgroup parameter, given a controller, cgroup id, parameter name * and a parameter value * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup * @param param - cgroup parameter ( e.g classid ) * @param value - value to be written to the parameter file * @throws ResourceHandlerException */ public void updateCGroupParam(CGroupController controller, String cGroupId, String param, String value) throws ResourceHandlerException; /** * reads a cgroup parameter value, given a controller, cgroup id, parameter * name * @param controller - controller type for the cgroup * @param cGroupId - id of the cgroup * @param param - cgroup parameter ( e.g classid ) * @return parameter value as read from the parameter file * @throws ResourceHandlerException */ public String getCGroupParam(CGroupController controller, String cGroupId, String param) throws ResourceHandlerException; }
4,929
33.475524
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/OutboundBandwidthResourceHandler.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Unstable public interface OutboundBandwidthResourceHandler extends ResourceHandler { }
1,162
37.766667
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandler.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import java.util.List; /** * Handler interface for resource subsystems' isolation and enforcement. e.g cpu, memory, network, disks etc */ @InterfaceAudience.Private @InterfaceStability.Unstable public interface ResourceHandler { /** * Bootstrap resource susbsystem. * * @return (possibly empty) list of operations that require elevated * privileges */ List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException; /** * Prepare a resource environment for container launch * * @param container Container being launched * @return (possibly empty) list of operations that require elevated * privileges e.g a) create a custom cgroup b) add pid for container to tasks * file for a cgroup. * @throws ResourceHandlerException */ List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException; /** * Require state for container that was already launched * * @param containerId if of the container being reacquired. * @return (possibly empty) list of operations that require elevated * privileges * @throws ResourceHandlerException */ List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException; /** * Perform any tasks necessary after container completion * @param containerId of the container that was completed. * @return (possibly empty) list of operations that require elevated * privileges * @throws ResourceHandlerException */ List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException; /** * Teardown environment for resource subsystem if requested. This method * needs to be used with care since it could impact running containers. * * @return (possibly empty) list of operations that require elevated * privileges */ List<PrivilegedOperation> teardown() throws ResourceHandlerException; }
3,298
35.252747
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; import java.io.*; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Support for interacting with various CGroup subsystems. Thread-safe. */ @InterfaceAudience.Private @InterfaceStability.Unstable class CGroupsHandlerImpl implements CGroupsHandler { private static final Log LOG = LogFactory.getLog(CGroupsHandlerImpl.class); private static final String MTAB_FILE = "/proc/mounts"; private static final String CGROUPS_FSTYPE = "cgroup"; private final String cGroupPrefix; private final boolean enableCGroupMount; private final String cGroupMountPath; private final long deleteCGroupTimeout; private final long deleteCGroupDelay; private Map<CGroupController, String> controllerPaths; private final ReadWriteLock rwLock; private final PrivilegedOperationExecutor privilegedOperationExecutor; private final Clock clock; public CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor privilegedOperationExecutor) throws ResourceHandlerException { this.cGroupPrefix = conf.get(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn") .replaceAll("^/", "").replaceAll("$/", ""); this.enableCGroupMount = conf.getBoolean(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_MOUNT, false); this.cGroupMountPath = conf.get(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null); this.deleteCGroupTimeout = conf.getLong( YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT); this.deleteCGroupDelay = conf.getLong(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY); this.controllerPaths = new HashMap<>(); this.rwLock = new ReentrantReadWriteLock(); this.privilegedOperationExecutor = privilegedOperationExecutor; this.clock = new SystemClock(); init(); } private void init() throws ResourceHandlerException { initializeControllerPaths(); } private String getControllerPath(CGroupController controller) { try { rwLock.readLock().lock(); return controllerPaths.get(controller); } finally { rwLock.readLock().unlock(); } } private void initializeControllerPaths() throws ResourceHandlerException { if (enableCGroupMount) { // nothing to do here - we support 'deferred' mounting of specific // controllers - we'll populate the path for a given controller when an // explicit mountCGroupController request is issued. LOG.info("CGroup controller mounting enabled."); } else { // cluster admins are expected to have mounted controllers in specific // locations - we'll attempt to figure out mount points Map<CGroupController, String> cPaths = initializeControllerPathsFromMtab(MTAB_FILE, this.cGroupPrefix); // we want to do a bulk update without the paths changing concurrently try { rwLock.writeLock().lock(); controllerPaths = cPaths; } finally { rwLock.writeLock().unlock(); } } } @VisibleForTesting static Map<CGroupController, String> initializeControllerPathsFromMtab( String mtab, String cGroupPrefix) throws ResourceHandlerException { try { Map<String, List<String>> parsedMtab = parseMtab(mtab); Map<CGroupController, String> ret = new HashMap<>(); for (CGroupController controller : CGroupController.values()) { String name = controller.getName(); String controllerPath = findControllerInMtab(name, parsedMtab); if (controllerPath != null) { File f = new File(controllerPath + "/" + cGroupPrefix); if (FileUtil.canWrite(f)) { ret.put(controller, controllerPath); } else { String error = new StringBuffer("Mount point Based on mtab file: ") .append(mtab) .append(". Controller mount point not writable for: ") .append(name).toString(); LOG.error(error); throw new ResourceHandlerException(error); } } else { LOG.warn("Controller not mounted but automount disabled: " + name); } } return ret; } catch (IOException e) { LOG.warn("Failed to initialize controller paths! Exception: " + e); throw new ResourceHandlerException( "Failed to initialize controller paths!"); } } /* We are looking for entries of the form: * none /cgroup/path/mem cgroup rw,memory 0 0 * * Use a simple pattern that splits on the five spaces, and * grabs the 2, 3, and 4th fields. */ private static final Pattern MTAB_FILE_FORMAT = Pattern.compile( "^[^\\s]+\\s([^\\s]+)\\s([^\\s]+)\\s([^\\s]+)\\s[^\\s]+\\s[^\\s]+$"); /* * Returns a map: path -> mount options * for mounts with type "cgroup". Cgroup controllers will * appear in the list of options for a path. */ private static Map<String, List<String>> parseMtab(String mtab) throws IOException { Map<String, List<String>> ret = new HashMap<String, List<String>>(); BufferedReader in = null; try { FileInputStream fis = new FileInputStream(new File(mtab)); in = new BufferedReader(new InputStreamReader(fis, "UTF-8")); for (String str = in.readLine(); str != null; str = in.readLine()) { Matcher m = MTAB_FILE_FORMAT.matcher(str); boolean mat = m.find(); if (mat) { String path = m.group(1); String type = m.group(2); String options = m.group(3); if (type.equals(CGROUPS_FSTYPE)) { List<String> value = Arrays.asList(options.split(",")); ret.put(path, value); } } } } catch (IOException e) { throw new IOException("Error while reading " + mtab, e); } finally { IOUtils.cleanup(LOG, in); } return ret; } private static String findControllerInMtab(String controller, Map<String, List<String>> entries) { for (Map.Entry<String, List<String>> e : entries.entrySet()) { if (e.getValue().contains(controller)) return e.getKey(); } return null; } @Override public void mountCGroupController(CGroupController controller) throws ResourceHandlerException { if (!enableCGroupMount) { LOG.warn("CGroup mounting is disabled - ignoring mount request for: " + controller.getName()); return; } String path = getControllerPath(controller); if (path == null) { try { //lock out other readers/writers till we are done rwLock.writeLock().lock(); String hierarchy = cGroupPrefix; StringBuffer controllerPath = new StringBuffer() .append(cGroupMountPath).append('/').append(controller.getName()); StringBuffer cGroupKV = new StringBuffer() .append(controller.getName()).append('=').append(controllerPath); PrivilegedOperation.OperationType opType = PrivilegedOperation .OperationType.MOUNT_CGROUPS; PrivilegedOperation op = new PrivilegedOperation(opType, (String) null); op.appendArgs(hierarchy, cGroupKV.toString()); LOG.info("Mounting controller " + controller.getName() + " at " + controllerPath); privilegedOperationExecutor.executePrivilegedOperation(op, false); //if privileged operation succeeds, update controller paths controllerPaths.put(controller, controllerPath.toString()); return; } catch (PrivilegedOperationException e) { LOG.error("Failed to mount controller: " + controller.getName()); throw new ResourceHandlerException("Failed to mount controller: " + controller.getName()); } finally { rwLock.writeLock().unlock(); } } else { LOG.info("CGroup controller already mounted at: " + path); return; } } @Override public String getRelativePathForCGroup(String cGroupId) { return new StringBuffer(cGroupPrefix).append("/") .append(cGroupId).toString(); } @Override public String getPathForCGroup(CGroupController controller, String cGroupId) { return new StringBuffer(getControllerPath(controller)) .append('/').append(cGroupPrefix).append("/") .append(cGroupId).toString(); } @Override public String getPathForCGroupTasks(CGroupController controller, String cGroupId) { return new StringBuffer(getPathForCGroup(controller, cGroupId)) .append('/').append(CGROUP_FILE_TASKS).toString(); } @Override public String getPathForCGroupParam(CGroupController controller, String cGroupId, String param) { return new StringBuffer(getPathForCGroup(controller, cGroupId)) .append('/').append(controller.getName()).append('.') .append(param).toString(); } @Override public String createCGroup(CGroupController controller, String cGroupId) throws ResourceHandlerException { String path = getPathForCGroup(controller, cGroupId); if (LOG.isDebugEnabled()) { LOG.debug("createCgroup: " + path); } if (!new File(path).mkdir()) { throw new ResourceHandlerException("Failed to create cgroup at " + path); } return path; } /* * Utility routine to print first line from cgroup tasks file */ private void logLineFromTasksFile(File cgf) { String str; if (LOG.isDebugEnabled()) { try (BufferedReader inl = new BufferedReader(new InputStreamReader(new FileInputStream(cgf + "/tasks"), "UTF-8"))) { if ((str = inl.readLine()) != null) { LOG.debug("First line in cgroup tasks file: " + cgf + " " + str); } } catch (IOException e) { LOG.warn("Failed to read cgroup tasks file. ", e); } } } /** * If tasks file is empty, delete the cgroup. * * @param cgf object referring to the cgroup to be deleted * @return Boolean indicating whether cgroup was deleted */ boolean checkAndDeleteCgroup(File cgf) throws InterruptedException { boolean deleted = false; // FileInputStream in = null; try (FileInputStream in = new FileInputStream(cgf + "/tasks")) { if (in.read() == -1) { /* * "tasks" file is empty, sleep a bit more and then try to delete the * cgroup. Some versions of linux will occasionally panic due to a race * condition in this area, hence the paranoia. */ Thread.sleep(deleteCGroupDelay); deleted = cgf.delete(); if (!deleted) { LOG.warn("Failed attempt to delete cgroup: " + cgf); } } else { logLineFromTasksFile(cgf); } } catch (IOException e) { LOG.warn("Failed to read cgroup tasks file. ", e); } return deleted; } @Override public void deleteCGroup(CGroupController controller, String cGroupId) throws ResourceHandlerException { boolean deleted = false; String cGroupPath = getPathForCGroup(controller, cGroupId); if (LOG.isDebugEnabled()) { LOG.debug("deleteCGroup: " + cGroupPath); } long start = clock.getTime(); do { try { deleted = checkAndDeleteCgroup(new File(cGroupPath)); if (!deleted) { Thread.sleep(deleteCGroupDelay); } } catch (InterruptedException ex) { // NOP } } while (!deleted && (clock.getTime() - start) < deleteCGroupTimeout); if (!deleted) { LOG.warn("Unable to delete " + cGroupPath + ", tried to delete for " + deleteCGroupTimeout + "ms"); } } @Override public void updateCGroupParam(CGroupController controller, String cGroupId, String param, String value) throws ResourceHandlerException { String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param); PrintWriter pw = null; if (LOG.isDebugEnabled()) { LOG.debug( "updateCGroupParam for path: " + cGroupParamPath + " with value " + value); } try { File file = new File(cGroupParamPath); Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8"); pw = new PrintWriter(w); pw.write(value); } catch (IOException e) { throw new ResourceHandlerException(new StringBuffer("Unable to write to ") .append(cGroupParamPath).append(" with value: ").append(value) .toString(), e); } finally { if (pw != null) { boolean hasError = pw.checkError(); pw.close(); if (hasError) { throw new ResourceHandlerException( new StringBuffer("Unable to write to ") .append(cGroupParamPath).append(" with value: ").append(value) .toString()); } if (pw.checkError()) { throw new ResourceHandlerException("Error while closing cgroup file" + " " + cGroupParamPath); } } } } @Override public String getCGroupParam(CGroupController controller, String cGroupId, String param) throws ResourceHandlerException { String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param); try { byte[] contents = Files.readAllBytes(Paths.get(cGroupParamPath)); return new String(contents, "UTF-8").trim(); } catch (IOException e) { throw new ResourceHandlerException( "Unable to read from " + cGroupParamPath); } } }
15,684
34.089485
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** * A helper class to delegate funcationality to a 'chain' of * ResourceHandler(s) */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ResourceHandlerChain implements ResourceHandler { private final List<ResourceHandler> resourceHandlers; public ResourceHandlerChain(List<ResourceHandler> resourceHandlers) { this.resourceHandlers = resourceHandlers; } @Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { List<PrivilegedOperation> allOperations = new ArrayList<PrivilegedOperation>(); for (ResourceHandler resourceHandler : resourceHandlers) { List<PrivilegedOperation> handlerOperations = resourceHandler.bootstrap(configuration); if (handlerOperations != null) { allOperations.addAll(handlerOperations); } } return allOperations; } @Override public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException { List<PrivilegedOperation> allOperations = new ArrayList<PrivilegedOperation>(); for (ResourceHandler resourceHandler : resourceHandlers) { List<PrivilegedOperation> handlerOperations = resourceHandler.preStart(container); if (handlerOperations != null) { allOperations.addAll(handlerOperations); } } return allOperations; } @Override public List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException { List<PrivilegedOperation> allOperations = new ArrayList<PrivilegedOperation>(); for (ResourceHandler resourceHandler : resourceHandlers) { List<PrivilegedOperation> handlerOperations = resourceHandler.reacquireContainer(containerId); if (handlerOperations != null) { allOperations.addAll(handlerOperations); } } return allOperations; } @Override public List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException { List<PrivilegedOperation> allOperations = new ArrayList<PrivilegedOperation>(); for (ResourceHandler resourceHandler : resourceHandlers) { List<PrivilegedOperation> handlerOperations = resourceHandler.postComplete(containerId); if (handlerOperations != null) { allOperations.addAll(handlerOperations); } } return allOperations; } @Override public List<PrivilegedOperation> teardown() throws ResourceHandlerException { List<PrivilegedOperation> allOperations = new ArrayList<PrivilegedOperation>(); for (ResourceHandler resourceHandler : resourceHandlers) { List<PrivilegedOperation> handlerOperations = resourceHandler.teardown(); if (handlerOperations != null) { allOperations.addAll(handlerOperations); } } return allOperations; } List<ResourceHandler> getResourceHandlerList() { return Collections.unmodifiableList(resourceHandlers); } }
4,470
30.265734
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; /** * Handler class to handle the blkio controller. Currently it splits resources * evenly across all containers. Once we have scheduling sorted out, we can * modify the function to represent the disk resources allocated. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class CGroupsBlkioResourceHandlerImpl implements DiskResourceHandler { static final Log LOG = LogFactory .getLog(CGroupsBlkioResourceHandlerImpl.class); private CGroupsHandler cGroupsHandler; // Arbitrarily choose a weight - all that matters is that all containers // get the same weight assigned to them. Once we have scheduling support // this number will be determined dynamically for each container. @VisibleForTesting static final String DEFAULT_WEIGHT = "500"; private static final String PARTITIONS_FILE = "/proc/partitions"; CGroupsBlkioResourceHandlerImpl(CGroupsHandler cGroupsHandler) { this.cGroupsHandler = cGroupsHandler; // check for linux so that we don't print messages for tests running on // other platforms if(Shell.LINUX) { checkDiskScheduler(); } } private void checkDiskScheduler() { String data; // read /proc/partitions and check to make sure that sd* and hd* // are using the CFQ scheduler. If they aren't print a warning try { byte[] contents = Files.readAllBytes(Paths.get(PARTITIONS_FILE)); data = new String(contents, "UTF-8").trim(); } catch (IOException e) { String msg = "Couldn't read " + PARTITIONS_FILE + "; can't determine disk scheduler type"; LOG.warn(msg, e); return; } String[] lines = data.split(System.lineSeparator()); if (lines.length > 0) { for (String line : lines) { String[] columns = line.split("\\s+"); if (columns.length > 4) { String partition = columns[4]; // check some known partitions to make sure the disk scheduler // is cfq - not meant to be comprehensive, more a sanity check if (partition.startsWith("sd") || partition.startsWith("hd") || partition.startsWith("vd") || partition.startsWith("xvd")) { String schedulerPath = "/sys/block/" + partition + "/queue/scheduler"; File schedulerFile = new File(schedulerPath); if (schedulerFile.exists()) { try { byte[] contents = Files.readAllBytes(Paths.get(schedulerPath)); String schedulerString = new String(contents, "UTF-8").trim(); if (!schedulerString.contains("[cfq]")) { LOG.warn("Device " + partition + " does not use the CFQ" + " scheduler; disk isolation using " + "CGroups will not work on this partition."); } } catch (IOException ie) { LOG.warn( "Unable to determine disk scheduler type for partition " + partition, ie); } } } } } } } @Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { // if bootstrap is called on this class, disk is already enabled // so no need to check again this.cGroupsHandler .mountCGroupController(CGroupsHandler.CGroupController.BLKIO); return null; } @Override public List<PrivilegedOperation> preStart(Container container) throws ResourceHandlerException { String cgroupId = container.getContainerId().toString(); cGroupsHandler .createCGroup(CGroupsHandler.CGroupController.BLKIO, cgroupId); try { cGroupsHandler.updateCGroupParam(CGroupsHandler.CGroupController.BLKIO, cgroupId, CGroupsHandler.CGROUP_PARAM_BLKIO_WEIGHT, DEFAULT_WEIGHT); } catch (ResourceHandlerException re) { cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.BLKIO, cgroupId); LOG.warn("Could not update cgroup for container", re); throw re; } List<PrivilegedOperation> ret = new ArrayList<>(); ret.add(new PrivilegedOperation( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupsHandler.getPathForCGroupTasks( CGroupsHandler.CGroupController.BLKIO, cgroupId))); return ret; } @Override public List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException { return null; } @Override public List<PrivilegedOperation> postComplete(ContainerId containerId) throws ResourceHandlerException { cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.BLKIO, containerId.toString()); return null; } @Override public List<PrivilegedOperation> teardown() throws ResourceHandlerException { return null; } }
6,563
37.385965
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DiskResourceHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Resource handler for disk resources. */ @InterfaceAudience.Private @InterfaceStability.Unstable public interface DiskResourceHandler extends ResourceHandler { }
1,182
37.16129
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerException.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.exceptions.YarnException; @InterfaceAudience.Private @InterfaceStability.Unstable public class ResourceHandlerException extends YarnException { private static final long serialVersionUID = 1L; public ResourceHandlerException() { super(); } public ResourceHandlerException(String message) { super(message); } public ResourceHandlerException(Throwable cause) { super(cause); } public ResourceHandlerException(String message, Throwable cause) { super(message, cause); } }
1,564
32.297872
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The launcher for the containers. This service should be started only after * the {@link ResourceLocalizationService} is started as it depends on creation * of system directories on the local file-system. * */ public class ContainersLauncher extends AbstractService implements EventHandler<ContainersLauncherEvent> { private static final Log LOG = LogFactory.getLog(ContainersLauncher.class); private final Context context; private final ContainerExecutor exec; private final Dispatcher dispatcher; private final ContainerManagerImpl containerManager; private LocalDirsHandlerService dirsHandler; @VisibleForTesting public ExecutorService containerLauncher = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setNameFormat("ContainersLauncher #%d") .build()); @VisibleForTesting public final Map<ContainerId, ContainerLaunch> running = Collections.synchronizedMap(new HashMap<ContainerId, ContainerLaunch>()); public ContainersLauncher(Context context, Dispatcher dispatcher, ContainerExecutor exec, LocalDirsHandlerService dirsHandler, ContainerManagerImpl containerManager) { super("containers-launcher"); this.exec = exec; this.context = context; this.dispatcher = dispatcher; this.dirsHandler = dirsHandler; this.containerManager = containerManager; } @Override protected void serviceInit(Configuration conf) throws Exception { try { //TODO Is this required? FileContext.getLocalFSFileContext(conf); } catch (UnsupportedFileSystemException e) { throw new YarnRuntimeException("Failed to start ContainersLauncher", e); } super.serviceInit(conf); } @Override protected void serviceStop() throws Exception { containerLauncher.shutdownNow(); super.serviceStop(); } @Override public void handle(ContainersLauncherEvent event) { // TODO: ContainersLauncher launches containers one by one!! Container container = event.getContainer(); ContainerId containerId = container.getContainerId(); switch (event.getType()) { case LAUNCH_CONTAINER: Application app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RECOVER_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case CLEANUP_CONTAINER: ContainerLaunch launcher = running.remove(containerId); if (launcher == null) { // Container not launched. So nothing needs to be done. return; } // Cleanup a container whether it is running/killed/completed, so that // no sub-processes are alive. try { launcher.cleanupContainer(); } catch (IOException e) { LOG.warn("Got exception while cleaning container " + containerId + ". Ignoring."); } break; } } }
5,785
37.832215
104
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import java.io.File; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; import org.apache.hadoop.yarn.util.ConverterUtils; /** * This is a ContainerLaunch which has been recovered after an NM restart (for * rolling upgrades) */ public class RecoveredContainerLaunch extends ContainerLaunch { private static final Log LOG = LogFactory.getLog( RecoveredContainerLaunch.class); public RecoveredContainerLaunch(Context context, Configuration configuration, Dispatcher dispatcher, ContainerExecutor exec, Application app, Container container, LocalDirsHandlerService dirsHandler, ContainerManagerImpl containerManager) { super(context, configuration, dispatcher, exec, app, container, dirsHandler, containerManager); this.shouldLaunchContainer.set(true); } /** * Wait on the process specified in pid file and return its exit code */ @SuppressWarnings("unchecked") @Override public Integer call() { int retCode = ExitCode.LOST.getExitCode(); ContainerId containerId = container.getContainerId(); String appIdStr = ConverterUtils.toString( containerId.getApplicationAttemptId().getApplicationId()); String containerIdStr = ConverterUtils.toString(containerId); dispatcher.getEventHandler().handle(new ContainerEvent(containerId, ContainerEventType.CONTAINER_LAUNCHED)); boolean notInterrupted = true; try { File pidFile = locatePidFile(appIdStr, containerIdStr); if (pidFile != null) { String pidPathStr = pidFile.getPath(); pidFilePath = new Path(pidPathStr); exec.activateContainer(containerId, pidFilePath); retCode = exec.reacquireContainer( new ContainerReacquisitionContext.Builder() .setUser(container.getUser()) .setContainerId(containerId) .build()); } else { LOG.warn("Unable to locate pid file for container " + containerIdStr); } } catch (IOException e) { LOG.error("Unable to recover container " + containerIdStr, e); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for exit code from " + containerId); notInterrupted = false; } finally { if (notInterrupted) { this.completed.set(true); exec.deactivateContainer(containerId); try { getContext().getNMStateStore().storeContainerCompleted(containerId, retCode); } catch (IOException e) { LOG.error("Unable to set exit code for container " + containerId); } } } if (retCode != 0) { LOG.warn("Recovered container exited with a non-zero exit code " + retCode); this.dispatcher.getEventHandler().handle(new ContainerExitEvent( containerId, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, retCode, "Container exited with a non-zero exit code " + retCode)); return retCode; } LOG.info("Recovered container " + containerId + " succeeded"); dispatcher.getEventHandler().handle( new ContainerEvent(containerId, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS)); return 0; } private File locatePidFile(String appIdStr, String containerIdStr) { String pidSubpath= getPidFileSubpath(appIdStr, containerIdStr); for (String dir : getContext().getLocalDirsHandler(). getLocalDirsForRead()) { File pidFile = new File(dir, pidSubpath); if (pidFile.exists()) { return pidFile; } } return null; } }
5,593
39.244604
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; public enum ContainersLauncherEventType { LAUNCH_CONTAINER, RECOVER_CONTAINER, CLEANUP_CONTAINER, // The process(grp) itself. }
1,004
37.653846
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; public class ContainersLauncherEvent extends AbstractEvent<ContainersLauncherEventType>{ private final Container container; public ContainersLauncherEvent(Container container, ContainersLauncherEventType eventType) { super(eventType); this.container = container; } public Container getContainer() { return container; } }
1,409
33.390244
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.DelayedProcessKiller; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.WindowsSecureContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; public class ContainerLaunch implements Callable<Integer> { private static final Log LOG = LogFactory.getLog(ContainerLaunch.class); public static final String CONTAINER_SCRIPT = Shell.appendScriptExtension("launch_container"); public static final String FINAL_CONTAINER_TOKENS_FILE = "container_tokens"; private static final String PID_FILE_NAME_FMT = "%s.pid"; private static final String EXIT_CODE_FILE_SUFFIX = ".exitcode"; protected final Dispatcher dispatcher; protected final ContainerExecutor exec; private final Application app; protected final Container container; private final Configuration conf; private final Context context; private final ContainerManagerImpl containerManager; protected AtomicBoolean shouldLaunchContainer = new AtomicBoolean(false); protected AtomicBoolean completed = new AtomicBoolean(false); private long sleepDelayBeforeSigKill = 250; private long maxKillWaitTime = 2000; protected Path pidFilePath = null; private final LocalDirsHandlerService dirsHandler; public ContainerLaunch(Context context, Configuration configuration, Dispatcher dispatcher, ContainerExecutor exec, Application app, Container container, LocalDirsHandlerService dirsHandler, ContainerManagerImpl containerManager) { this.context = context; this.conf = configuration; this.app = app; this.exec = exec; this.container = container; this.dispatcher = dispatcher; this.dirsHandler = dirsHandler; this.containerManager = containerManager; this.sleepDelayBeforeSigKill = conf.getLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, YarnConfiguration.DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS); this.maxKillWaitTime = conf.getLong(YarnConfiguration.NM_PROCESS_KILL_WAIT_MS, YarnConfiguration.DEFAULT_NM_PROCESS_KILL_WAIT_MS); } @VisibleForTesting public static String expandEnvironment(String var, Path containerLogDir) { var = var.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR, containerLogDir.toString()); var = var.replace(ApplicationConstants.CLASS_PATH_SEPARATOR, File.pathSeparator); // replace parameter expansion marker. e.g. {{VAR}} on Windows is replaced // as %VAR% and on Linux replaced as "$VAR" if (Shell.WINDOWS) { var = var.replaceAll("(\\{\\{)|(\\}\\})", "%"); } else { var = var.replace(ApplicationConstants.PARAMETER_EXPANSION_LEFT, "$"); var = var.replace(ApplicationConstants.PARAMETER_EXPANSION_RIGHT, ""); } return var; } @Override @SuppressWarnings("unchecked") // dispatcher not typed public Integer call() { final ContainerLaunchContext launchContext = container.getLaunchContext(); Map<Path,List<String>> localResources = null; ContainerId containerID = container.getContainerId(); String containerIdStr = ConverterUtils.toString(containerID); final List<String> command = launchContext.getCommands(); int ret = -1; // CONTAINER_KILLED_ON_REQUEST should not be missed if the container // is already at KILLING if (container.getContainerState() == ContainerState.KILLING) { dispatcher.getEventHandler().handle( new ContainerExitEvent(containerID, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, Shell.WINDOWS ? ExitCode.FORCE_KILLED.getExitCode() : ExitCode.TERMINATED.getExitCode(), "Container terminated before launch.")); return 0; } try { localResources = container.getLocalizedResources(); if (localResources == null) { throw RPCUtil.getRemoteException( "Unable to get local resources when Container " + containerID + " is at " + container.getContainerState()); } final String user = container.getUser(); // /////////////////////////// Variable expansion // Before the container script gets written out. List<String> newCmds = new ArrayList<String>(command.size()); String appIdStr = app.getAppId().toString(); String relativeContainerLogDir = ContainerLaunch .getRelativeContainerLogDir(appIdStr, containerIdStr); Path containerLogDir = dirsHandler.getLogPathForWrite(relativeContainerLogDir, false); for (String str : command) { // TODO: Should we instead work via symlinks without this grammar? newCmds.add(expandEnvironment(str, containerLogDir)); } launchContext.setCommands(newCmds); Map<String, String> environment = launchContext.getEnvironment(); // Make a copy of env to iterate & do variable expansion for (Entry<String, String> entry : environment.entrySet()) { String value = entry.getValue(); value = expandEnvironment(value, containerLogDir); entry.setValue(value); } // /////////////////////////// End of variable expansion FileContext lfs = FileContext.getLocalFSFileContext(); Path nmPrivateContainerScriptPath = dirsHandler.getLocalPathForWrite( getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR + CONTAINER_SCRIPT); Path nmPrivateTokensPath = dirsHandler.getLocalPathForWrite( getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR + String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, containerIdStr)); Path nmPrivateClasspathJarDir = dirsHandler.getLocalPathForWrite( getContainerPrivateDir(appIdStr, containerIdStr)); DataOutputStream containerScriptOutStream = null; DataOutputStream tokensOutStream = null; // Select the working directory for the container Path containerWorkDir = dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + Path.SEPARATOR + user + Path.SEPARATOR + ContainerLocalizer.APPCACHE + Path.SEPARATOR + appIdStr + Path.SEPARATOR + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, false); String pidFileSubpath = getPidFileSubpath(appIdStr, containerIdStr); // pid file should be in nm private dir so that it is not // accessible by users pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); List<String> containerLogDirs = new ArrayList<String>(); for( String logDir : logDirs) { containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir); } if (!dirsHandler.areDisksHealthy()) { ret = ContainerExitStatus.DISKS_FAILED; throw new IOException("Most of the disks failed. " + dirsHandler.getDisksHealthReport(false)); } try { // /////////// Write out the container-script in the nmPrivate space. List<Path> appDirs = new ArrayList<Path>(localDirs.size()); for (String localDir : localDirs) { Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, user); Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE); appDirs.add(new Path(appsdir, appIdStr)); } containerScriptOutStream = lfs.create(nmPrivateContainerScriptPath, EnumSet.of(CREATE, OVERWRITE)); // Set the token location too. environment.put( ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath()); // Sanitize the container's environment sanitizeEnv(environment, containerWorkDir, appDirs, containerLogDirs, localResources, nmPrivateClasspathJarDir); // Write out the environment exec.writeLaunchEnv(containerScriptOutStream, environment, localResources, launchContext.getCommands()); // /////////// End of writing out container-script // /////////// Write out the container-tokens in the nmPrivate space. tokensOutStream = lfs.create(nmPrivateTokensPath, EnumSet.of(CREATE, OVERWRITE)); Credentials creds = container.getCredentials(); creds.writeTokenStorageToStream(tokensOutStream); // /////////// End of writing out container-tokens } finally { IOUtils.cleanup(LOG, containerScriptOutStream, tokensOutStream); } // LaunchContainer is a blocking call. We are here almost means the // container is launched, so send out the event. dispatcher.getEventHandler().handle(new ContainerEvent( containerID, ContainerEventType.CONTAINER_LAUNCHED)); context.getNMStateStore().storeContainerLaunched(containerID); // Check if the container is signalled to be killed. if (!shouldLaunchContainer.compareAndSet(false, true)) { LOG.info("Container " + containerIdStr + " not launched as " + "cleanup already called"); ret = ExitCode.TERMINATED.getExitCode(); } else { exec.activateContainer(containerID, pidFilePath); ret = exec.launchContainer(new ContainerStartContext.Builder() .setContainer(container) .setLocalizedResources(localResources) .setNmPrivateContainerScriptPath(nmPrivateContainerScriptPath) .setNmPrivateTokensPath(nmPrivateTokensPath) .setUser(user) .setAppId(appIdStr) .setContainerWorkDir(containerWorkDir) .setLocalDirs(localDirs) .setLogDirs(logDirs) .build()); } } catch (Throwable e) { LOG.warn("Failed to launch container.", e); dispatcher.getEventHandler().handle(new ContainerExitEvent( containerID, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, ret, e.getMessage())); return ret; } finally { completed.set(true); exec.deactivateContainer(containerID); try { context.getNMStateStore().storeContainerCompleted(containerID, ret); } catch (IOException e) { LOG.error("Unable to set exit code for container " + containerID); } } if (LOG.isDebugEnabled()) { LOG.debug("Container " + containerIdStr + " completed with exit code " + ret); } if (ret == ExitCode.FORCE_KILLED.getExitCode() || ret == ExitCode.TERMINATED.getExitCode()) { // If the process was killed, Send container_cleanedup_after_kill and // just break out of this method. dispatcher.getEventHandler().handle( new ContainerExitEvent(containerID, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, ret, "Container exited with a non-zero exit code " + ret)); return ret; } if (ret != 0) { LOG.warn("Container exited with a non-zero exit code " + ret); this.dispatcher.getEventHandler().handle(new ContainerExitEvent( containerID, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, ret, "Container exited with a non-zero exit code " + ret)); return ret; } LOG.info("Container " + containerIdStr + " succeeded "); dispatcher.getEventHandler().handle( new ContainerEvent(containerID, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS)); return 0; } protected String getPidFileSubpath(String appIdStr, String containerIdStr) { return getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR + String.format(ContainerLaunch.PID_FILE_NAME_FMT, containerIdStr); } /** * Cleanup the container. * Cancels the launch if launch has not started yet or signals * the executor to not execute the process if not already done so. * Also, sends a SIGTERM followed by a SIGKILL to the process if * the process id is available. * @throws IOException */ @SuppressWarnings("unchecked") // dispatcher not typed public void cleanupContainer() throws IOException { ContainerId containerId = container.getContainerId(); String containerIdStr = ConverterUtils.toString(containerId); LOG.info("Cleaning up container " + containerIdStr); try { context.getNMStateStore().storeContainerKilled(containerId); } catch (IOException e) { LOG.error("Unable to mark container " + containerId + " killed in store", e); } // launch flag will be set to true if process already launched boolean alreadyLaunched = !shouldLaunchContainer.compareAndSet(false, true); if (!alreadyLaunched) { LOG.info("Container " + containerIdStr + " not launched." + " No cleanup needed to be done"); return; } LOG.debug("Marking container " + containerIdStr + " as inactive"); // this should ensure that if the container process has not launched // by this time, it will never be launched exec.deactivateContainer(containerId); if (LOG.isDebugEnabled()) { LOG.debug("Getting pid for container " + containerIdStr + " to kill" + " from pid file " + (pidFilePath != null ? pidFilePath.toString() : "null")); } // however the container process may have already started try { // get process id from pid file if available // else if shell is still active, get it from the shell String processId = null; if (pidFilePath != null) { processId = getContainerPid(pidFilePath); } // kill process if (processId != null) { String user = container.getUser(); LOG.debug("Sending signal to pid " + processId + " as user " + user + " for container " + containerIdStr); final Signal signal = sleepDelayBeforeSigKill > 0 ? Signal.TERM : Signal.KILL; boolean result = exec.signalContainer( new ContainerSignalContext.Builder() .setContainer(container) .setUser(user) .setPid(processId) .setSignal(signal) .build()); LOG.debug("Sent signal " + signal + " to pid " + processId + " as user " + user + " for container " + containerIdStr + ", result=" + (result? "success" : "failed")); if (sleepDelayBeforeSigKill > 0) { new DelayedProcessKiller(container, user, processId, sleepDelayBeforeSigKill, Signal.KILL, exec).start(); } } } catch (Exception e) { String message = "Exception when trying to cleanup container " + containerIdStr + ": " + StringUtils.stringifyException(e); LOG.warn(message); dispatcher.getEventHandler().handle( new ContainerDiagnosticsUpdateEvent(containerId, message)); } finally { // cleanup pid file if present if (pidFilePath != null) { FileContext lfs = FileContext.getLocalFSFileContext(); lfs.delete(pidFilePath, false); lfs.delete(pidFilePath.suffix(EXIT_CODE_FILE_SUFFIX), false); } } } /** * Loop through for a time-bounded interval waiting to * read the process id from a file generated by a running process. * @param pidFilePath File from which to read the process id * @return Process ID * @throws Exception */ private String getContainerPid(Path pidFilePath) throws Exception { String containerIdStr = ConverterUtils.toString(container.getContainerId()); String processId = null; LOG.debug("Accessing pid for container " + containerIdStr + " from pid file " + pidFilePath); int sleepCounter = 0; final int sleepInterval = 100; // loop waiting for pid file to show up // until our timer expires in which case we admit defeat while (true) { processId = ProcessIdFileReader.getProcessId(pidFilePath); if (processId != null) { LOG.debug("Got pid " + processId + " for container " + containerIdStr); break; } else if ((sleepCounter*sleepInterval) > maxKillWaitTime) { LOG.info("Could not get pid for " + containerIdStr + ". Waited for " + maxKillWaitTime + " ms."); break; } else { ++sleepCounter; Thread.sleep(sleepInterval); } } return processId; } public static String getRelativeContainerLogDir(String appIdStr, String containerIdStr) { return appIdStr + Path.SEPARATOR + containerIdStr; } private String getContainerPrivateDir(String appIdStr, String containerIdStr) { return getAppPrivateDir(appIdStr) + Path.SEPARATOR + containerIdStr + Path.SEPARATOR; } private String getAppPrivateDir(String appIdStr) { return ResourceLocalizationService.NM_PRIVATE_DIR + Path.SEPARATOR + appIdStr; } Context getContext() { return context; } public static abstract class ShellScriptBuilder { public static ShellScriptBuilder create() { return Shell.WINDOWS ? new WindowsShellScriptBuilder() : new UnixShellScriptBuilder(); } private static final String LINE_SEPARATOR = System.getProperty("line.separator"); private final StringBuilder sb = new StringBuilder(); public abstract void command(List<String> command) throws IOException; public abstract void whitelistedEnv(String key, String value) throws IOException; public abstract void env(String key, String value) throws IOException; public final void symlink(Path src, Path dst) throws IOException { if (!src.isAbsolute()) { throw new IOException("Source must be absolute"); } if (dst.isAbsolute()) { throw new IOException("Destination must be relative"); } if (dst.toUri().getPath().indexOf('/') != -1) { mkdir(dst.getParent()); } link(src, dst); } @Override public String toString() { return sb.toString(); } public final void write(PrintStream out) throws IOException { out.append(sb); } protected final void line(String... command) { for (String s : command) { sb.append(s); } sb.append(LINE_SEPARATOR); } protected abstract void link(Path src, Path dst) throws IOException; protected abstract void mkdir(Path path) throws IOException; } private static final class UnixShellScriptBuilder extends ShellScriptBuilder { private void errorCheck() { line("hadoop_shell_errorcode=$?"); line("if [ $hadoop_shell_errorcode -ne 0 ]"); line("then"); line(" exit $hadoop_shell_errorcode"); line("fi"); } public UnixShellScriptBuilder(){ line("#!/bin/bash"); line(); } @Override public void command(List<String> command) { line("exec /bin/bash -c \"", StringUtils.join(" ", command), "\""); errorCheck(); } @Override public void whitelistedEnv(String key, String value) { line("export ", key, "=${", key, ":-", "\"", value, "\"}"); } @Override public void env(String key, String value) { line("export ", key, "=\"", value, "\""); } @Override protected void link(Path src, Path dst) throws IOException { line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\""); errorCheck(); } @Override protected void mkdir(Path path) { line("mkdir -p ", path.toString()); errorCheck(); } } private static final class WindowsShellScriptBuilder extends ShellScriptBuilder { private void errorCheck() { line("@if %errorlevel% neq 0 exit /b %errorlevel%"); } private void lineWithLenCheck(String... commands) throws IOException { Shell.checkWindowsCommandLineLength(commands); line(commands); } public WindowsShellScriptBuilder() { line("@setlocal"); line(); } @Override public void command(List<String> command) throws IOException { lineWithLenCheck("@call ", StringUtils.join(" ", command)); errorCheck(); } @Override public void whitelistedEnv(String key, String value) throws IOException { lineWithLenCheck("@set ", key, "=", value); errorCheck(); } @Override public void env(String key, String value) throws IOException { lineWithLenCheck("@set ", key, "=", value); errorCheck(); } @Override protected void link(Path src, Path dst) throws IOException { File srcFile = new File(src.toUri().getPath()); String srcFileStr = srcFile.getPath(); String dstFileStr = new File(dst.toString()).getPath(); // If not on Java7+ on Windows, then copy file instead of symlinking. // See also FileUtil#symLink for full explanation. if (!Shell.isJava7OrAbove() && srcFile.isFile()) { lineWithLenCheck(String.format("@copy \"%s\" \"%s\"", srcFileStr, dstFileStr)); errorCheck(); } else { lineWithLenCheck(String.format("@%s symlink \"%s\" \"%s\"", Shell.WINUTILS, dstFileStr, srcFileStr)); errorCheck(); } } @Override protected void mkdir(Path path) throws IOException { lineWithLenCheck(String.format("@if not exist \"%s\" mkdir \"%s\"", path.toString(), path.toString())); errorCheck(); } } private static void putEnvIfNotNull( Map<String, String> environment, String variable, String value) { if (value != null) { environment.put(variable, value); } } private static void putEnvIfAbsent( Map<String, String> environment, String variable) { if (environment.get(variable) == null) { putEnvIfNotNull(environment, variable, System.getenv(variable)); } } public void sanitizeEnv(Map<String, String> environment, Path pwd, List<Path> appDirs, List<String> containerLogDirs, Map<Path, List<String>> resources, Path nmPrivateClasspathJarDir) throws IOException { /** * Non-modifiable environment variables */ environment.put(Environment.CONTAINER_ID.name(), container .getContainerId().toString()); environment.put(Environment.NM_PORT.name(), String.valueOf(this.context.getNodeId().getPort())); environment.put(Environment.NM_HOST.name(), this.context.getNodeId() .getHost()); environment.put(Environment.NM_HTTP_PORT.name(), String.valueOf(this.context.getHttpPort())); environment.put(Environment.LOCAL_DIRS.name(), StringUtils.join(",", appDirs)); environment.put(Environment.LOG_DIRS.name(), StringUtils.join(",", containerLogDirs)); environment.put(Environment.USER.name(), container.getUser()); environment.put(Environment.LOGNAME.name(), container.getUser()); environment.put(Environment.HOME.name(), conf.get( YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR ) ); environment.put(Environment.PWD.name(), pwd.toString()); putEnvIfNotNull(environment, Environment.HADOOP_CONF_DIR.name(), System.getenv(Environment.HADOOP_CONF_DIR.name()) ); if (!Shell.WINDOWS) { environment.put("JVM_PID", "$$"); } /** * Modifiable environment variables */ // allow containers to override these variables String[] whitelist = conf.get(YarnConfiguration.NM_ENV_WHITELIST, YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); for(String whitelistEnvVariable : whitelist) { putEnvIfAbsent(environment, whitelistEnvVariable.trim()); } // variables here will be forced in, even if the container has specified them. Apps.setEnvFromInputString(environment, conf.get( YarnConfiguration.NM_ADMIN_USER_ENV, YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV), File.pathSeparator); // TODO: Remove Windows check and use this approach on all platforms after // additional testing. See YARN-358. if (Shell.WINDOWS) { String inputClassPath = environment.get(Environment.CLASSPATH.name()); if (inputClassPath != null && !inputClassPath.isEmpty()) { //On non-windows, localized resources //from distcache are available via the classpath as they were placed //there but on windows they are not available when the classpath //jar is created and so they "are lost" and have to be explicitly //added to the classpath instead. This also means that their position //is lost relative to other non-distcache classpath entries which will //break things like mapreduce.job.user.classpath.first. An environment //variable can be set to indicate that distcache entries should come //first boolean preferLocalizedJars = Boolean.valueOf( environment.get(Environment.CLASSPATH_PREPEND_DISTCACHE.name()) ); boolean needsSeparator = false; StringBuilder newClassPath = new StringBuilder(); if (!preferLocalizedJars) { newClassPath.append(inputClassPath); needsSeparator = true; } // Localized resources do not exist at the desired paths yet, because the // container launch script has not run to create symlinks yet. This // means that FileUtil.createJarWithClassPath can't automatically expand // wildcards to separate classpath entries for each file in the manifest. // To resolve this, append classpath entries explicitly for each // resource. for (Map.Entry<Path,List<String>> entry : resources.entrySet()) { boolean targetIsDirectory = new File(entry.getKey().toUri().getPath()) .isDirectory(); for (String linkName : entry.getValue()) { // Append resource. if (needsSeparator) { newClassPath.append(File.pathSeparator); } else { needsSeparator = true; } newClassPath.append(pwd.toString()) .append(Path.SEPARATOR).append(linkName); // FileUtil.createJarWithClassPath must use File.toURI to convert // each file to a URI to write into the manifest's classpath. For // directories, the classpath must have a trailing '/', but // File.toURI only appends the trailing '/' if it is a directory that // already exists. To resolve this, add the classpath entries with // explicit trailing '/' here for any localized resource that targets // a directory. Then, FileUtil.createJarWithClassPath will guarantee // that the resulting entry in the manifest's classpath will have a // trailing '/', and thus refer to a directory instead of a file. if (targetIsDirectory) { newClassPath.append(Path.SEPARATOR); } } } if (preferLocalizedJars) { if (needsSeparator) { newClassPath.append(File.pathSeparator); } newClassPath.append(inputClassPath); } // When the container launches, it takes the parent process's environment // and then adds/overwrites with the entries from the container launch // context. Do the same thing here for correct substitution of // environment variables in the classpath jar manifest. Map<String, String> mergedEnv = new HashMap<String, String>( System.getenv()); mergedEnv.putAll(environment); // this is hacky and temporary - it's to preserve the windows secure // behavior but enable non-secure windows to properly build the class // path for access to job.jar/lib/xyz and friends (see YARN-2803) Path jarDir; if (exec instanceof WindowsSecureContainerExecutor) { jarDir = nmPrivateClasspathJarDir; } else { jarDir = pwd; } String[] jarCp = FileUtil.createJarWithClassPath( newClassPath.toString(), jarDir, pwd, mergedEnv); // In a secure cluster the classpath jar must be localized to grant access Path localizedClassPathJar = exec.localizeClasspathJar( new Path(jarCp[0]), pwd, container.getUser()); String replacementClassPath = localizedClassPathJar.toString() + jarCp[1]; environment.put(Environment.CLASSPATH.name(), replacementClassPath); } } // put AuxiliaryService data to environment for (Map.Entry<String, ByteBuffer> meta : containerManager .getAuxServiceMetaData().entrySet()) { AuxiliaryServiceHelper.setServiceDataIntoEnv( meta.getKey(), meta.getValue(), environment); } } public static String getExitCodeFile(String pidFile) { return pidFile + EXIT_CODE_FILE_SUFFIX; } }
33,256
37.626016
125
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.concurrent.Future; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ContainerId; import com.google.common.cache.LoadingCache; public class LocalizerContext { private final String user; private final ContainerId containerId; private final Credentials credentials; private final LoadingCache<Path,Future<FileStatus>> statCache; public LocalizerContext(String user, ContainerId containerId, Credentials credentials) { this(user, containerId, credentials, null); } public LocalizerContext(String user, ContainerId containerId, Credentials credentials, LoadingCache<Path,Future<FileStatus>> statCache) { this.user = user; this.containerId = containerId; this.credentials = credentials; this.statCache = statCache; } public String getUser() { return user; } public ContainerId getContainerId() { return containerId; } public Credentials getCredentials() { return credentials; } public LoadingCache<Path,Future<FileStatus>> getStatCache() { return statCache; } }
2,054
29.671642
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; enum ResourceState { INIT, DOWNLOADING, LOCALIZED, FAILED }
954
35.730769
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTracker.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent; /** * Component tracking resources all of the same {@link LocalResourceVisibility} * */ interface LocalResourcesTracker extends EventHandler<ResourceEvent>, Iterable<LocalizedResource> { boolean remove(LocalizedResource req, DeletionService delService); Path getPathForLocalization(LocalResourceRequest req, Path localDirPath); String getUser(); LocalizedResource getLocalizedResource(LocalResourceRequest request); }
1,624
37.690476
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.HashMap; import java.util.LinkedList; import java.util.Queue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.conf.YarnConfiguration; import com.google.common.annotations.VisibleForTesting; /** * {@link LocalCacheDirectoryManager} is used for managing hierarchical * directories for local cache. It will allow to restrict the number of files in * a directory to * {@link YarnConfiguration#NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY} which * includes 36 sub-directories (named from 0 to 9 and a to z). Root directory is * represented by an empty string. It internally maintains a vacant directory * queue. As soon as the file count for the directory reaches its limit; new * files will not be created in it until at least one file is deleted from it. * New sub directories are not created unless a * {@link LocalCacheDirectoryManager#getRelativePathForLocalization()} request * is made and nonFullDirectories are empty. * * Note : this structure only returns relative localization path but doesn't * create one on disk. */ public class LocalCacheDirectoryManager { private final int perDirectoryFileLimit; // total 36 = a to z plus 0 to 9 public static final int DIRECTORIES_PER_LEVEL = 36; private Queue<Directory> nonFullDirectories; private HashMap<String, Directory> knownDirectories; private int totalSubDirectories; public LocalCacheDirectoryManager(Configuration conf) { totalSubDirectories = 0; Directory rootDir = new Directory(totalSubDirectories); nonFullDirectories = new LinkedList<Directory>(); knownDirectories = new HashMap<String, Directory>(); knownDirectories.put("", rootDir); nonFullDirectories.add(rootDir); this.perDirectoryFileLimit = conf.getInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, YarnConfiguration.DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY) - 36; } /** * This method will return relative path from the first available vacant * directory. * * @return {@link String} relative path for localization */ public synchronized String getRelativePathForLocalization() { if (nonFullDirectories.isEmpty()) { totalSubDirectories++; Directory newDir = new Directory(totalSubDirectories); nonFullDirectories.add(newDir); knownDirectories.put(newDir.getRelativePath(), newDir); } Directory subDir = nonFullDirectories.peek(); if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) { nonFullDirectories.remove(); } return subDir.getRelativePath(); } /** * This method will reduce the file count for the directory represented by * path. The root directory of this Local cache directory manager is * represented by an empty string. */ public synchronized void decrementFileCountForPath(String relPath) { relPath = relPath == null ? "" : relPath.trim(); Directory subDir = knownDirectories.get(relPath); int oldCount = subDir.getCount(); if (subDir.decrementAndGetCount() < perDirectoryFileLimit && oldCount >= perDirectoryFileLimit) { nonFullDirectories.add(subDir); } } /** * Increment the file count for a relative directory within the cache * * @param relPath the relative path */ public synchronized void incrementFileCountForPath(String relPath) { relPath = relPath == null ? "" : relPath.trim(); Directory subDir = knownDirectories.get(relPath); if (subDir == null) { int dirnum = Directory.getDirectoryNumber(relPath); totalSubDirectories = Math.max(dirnum, totalSubDirectories); subDir = new Directory(dirnum); nonFullDirectories.add(subDir); knownDirectories.put(subDir.getRelativePath(), subDir); } if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) { nonFullDirectories.remove(subDir); } } /** * Given a path to a directory within a local cache tree return the * root of the cache directory. * * @param path the directory within a cache directory * @return the local cache directory root or null if not found */ public static Path getCacheDirectoryRoot(Path path) { while (path != null) { String name = path.getName(); if (name.length() != 1) { return path; } int dirnum = DIRECTORIES_PER_LEVEL; try { dirnum = Integer.parseInt(name, DIRECTORIES_PER_LEVEL); } catch (NumberFormatException e) { } if (dirnum >= DIRECTORIES_PER_LEVEL) { return path; } path = path.getParent(); } return path; } @VisibleForTesting synchronized Directory getDirectory(String relPath) { return knownDirectories.get(relPath); } /* * It limits the number of files and sub directories in the directory to the * limit LocalCacheDirectoryManager#perDirectoryFileLimit. */ static class Directory { private final String relativePath; private int fileCount; static String getRelativePath(int directoryNo) { String relativePath = ""; if (directoryNo > 0) { String tPath = Integer.toString(directoryNo - 1, DIRECTORIES_PER_LEVEL); StringBuffer sb = new StringBuffer(); if (tPath.length() == 1) { sb.append(tPath.charAt(0)); } else { // this is done to make sure we also reuse 0th sub directory sb.append(Integer.toString( Integer.parseInt(tPath.substring(0, 1), DIRECTORIES_PER_LEVEL) - 1, DIRECTORIES_PER_LEVEL)); } for (int i = 1; i < tPath.length(); i++) { sb.append(Path.SEPARATOR).append(tPath.charAt(i)); } relativePath = sb.toString(); } return relativePath; } static int getDirectoryNumber(String relativePath) { String numStr = relativePath.replace("/", ""); if (relativePath.isEmpty()) { return 0; } if (numStr.length() > 1) { // undo step from getRelativePath() to reuse 0th sub directory String firstChar = Integer.toString( Integer.parseInt(numStr.substring(0, 1), DIRECTORIES_PER_LEVEL) + 1, DIRECTORIES_PER_LEVEL); numStr = firstChar + numStr.substring(1); } return Integer.parseInt(numStr, DIRECTORIES_PER_LEVEL) + 1; } public Directory(int directoryNo) { fileCount = 0; relativePath = getRelativePath(directoryNo); } public int incrementAndGetCount() { return ++fileCount; } public int decrementAndGetCount() { return --fileCount; } public String getRelativePath() { return relativePath; } public int getCount() { return fileCount; } } }
7,642
33.58371
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.net.URISyntaxException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.util.ConverterUtils; public class LocalResourceRequest extends LocalResource implements Comparable<LocalResourceRequest> { private final Path loc; private final long timestamp; private final LocalResourceType type; private final LocalResourceVisibility visibility; private final String pattern; /** * Wrap API resource to match against cache of localized resources. * @param resource Resource requested by container * @throws URISyntaxException If the path is malformed */ public LocalResourceRequest(LocalResource resource) throws URISyntaxException { this(ConverterUtils.getPathFromYarnURL(resource.getResource()), resource.getTimestamp(), resource.getType(), resource.getVisibility(), resource.getPattern()); } LocalResourceRequest(Path loc, long timestamp, LocalResourceType type, LocalResourceVisibility visibility, String pattern) { this.loc = loc; this.timestamp = timestamp; this.type = type; this.visibility = visibility; this.pattern = pattern; } @Override public int hashCode() { int hash = loc.hashCode() ^ (int)((timestamp >>> 32) ^ timestamp) * type.hashCode(); if(pattern != null) { hash = hash ^ pattern.hashCode(); } return hash; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof LocalResourceRequest)) { return false; } final LocalResourceRequest other = (LocalResourceRequest) o; String pattern = getPattern(); String otherPattern = other.getPattern(); boolean patternEquals = (pattern == null && otherPattern == null) || (pattern != null && otherPattern != null && pattern.equals(otherPattern)); return getPath().equals(other.getPath()) && getTimestamp() == other.getTimestamp() && getType() == other.getType() && patternEquals; } @Override public int compareTo(LocalResourceRequest other) { if (this == other) { return 0; } int ret = getPath().compareTo(other.getPath()); if (0 == ret) { ret = (int)(getTimestamp() - other.getTimestamp()); if (0 == ret) { ret = getType().ordinal() - other.getType().ordinal(); if (0 == ret) { String pattern = getPattern(); String otherPattern = other.getPattern(); if (pattern == null && otherPattern == null) { ret = 0; } else if (pattern == null) { ret = -1; } else if (otherPattern == null) { ret = 1; } else { ret = pattern.compareTo(otherPattern); } } } } return ret; } public Path getPath() { return loc; } @Override public long getTimestamp() { return timestamp; } @Override public LocalResourceType getType() { return type; } @Override public URL getResource() { return ConverterUtils.getYarnUrlFromPath(loc); } @Override public long getSize() { return -1L; } @Override public LocalResourceVisibility getVisibility() { return visibility; } @Override public String getPattern() { return pattern; } @Override public boolean getShouldBeUploadedToSharedCache() { throw new UnsupportedOperationException(); } @Override public void setShouldBeUploadedToSharedCache( boolean shouldBeUploadedToSharedCache) { throw new UnsupportedOperationException(); } @Override public void setResource(URL resource) { throw new UnsupportedOperationException(); } @Override public void setSize(long size) { throw new UnsupportedOperationException(); } @Override public void setTimestamp(long timestamp) { throw new UnsupportedOperationException(); } @Override public void setType(LocalResourceType type) { throw new UnsupportedOperationException(); } @Override public void setVisibility(LocalResourceVisibility visibility) { throw new UnsupportedOperationException(); } @Override public void setPattern(String pattern) { throw new UnsupportedOperationException(); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ "); sb.append(getPath().toString()).append(", "); sb.append(getTimestamp()).append(", "); sb.append(getType()).append(", "); sb.append(getPattern()).append(" }"); return sb.toString(); } }
5,694
26.645631
82
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceRetentionSet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.Comparator; import java.util.Iterator; import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; public class ResourceRetentionSet { private long delSize; private long currentSize; private final long targetSize; private final DeletionService delService; private final SortedMap<LocalizedResource,LocalResourcesTracker> retain; ResourceRetentionSet(DeletionService delService, long targetSize) { this(delService, targetSize, new LRUComparator()); } ResourceRetentionSet(DeletionService delService, long targetSize, Comparator<? super LocalizedResource> cmp) { this(delService, targetSize, new TreeMap<LocalizedResource,LocalResourcesTracker>(cmp)); } ResourceRetentionSet(DeletionService delService, long targetSize, SortedMap<LocalizedResource,LocalResourcesTracker> retain) { this.retain = retain; this.delService = delService; this.targetSize = targetSize; } public void addResources(LocalResourcesTracker newTracker) { for (LocalizedResource resource : newTracker) { currentSize += resource.getSize(); if (resource.getRefCount() > 0) { // always retain resources in use continue; } retain.put(resource, newTracker); } for (Iterator<Map.Entry<LocalizedResource,LocalResourcesTracker>> i = retain.entrySet().iterator(); currentSize - delSize > targetSize && i.hasNext();) { Map.Entry<LocalizedResource,LocalResourcesTracker> rsrc = i.next(); LocalizedResource resource = rsrc.getKey(); LocalResourcesTracker tracker = rsrc.getValue(); if (tracker.remove(resource, delService)) { delSize += resource.getSize(); i.remove(); } } } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Cache: ").append(currentSize).append(", "); sb.append("Deleted: ").append(delSize); return sb.toString(); } static class LRUComparator implements Comparator<LocalizedResource> { public int compare(LocalizedResource r1, LocalizedResource r2) { long ret = r1.getTimestamp() - r2.getTimestamp(); if (0 == ret) { return System.identityHashCode(r1) - System.identityHashCode(r2); } return ret > 0 ? 1 : -1; } public boolean equals(Object other) { return this == other; } } }
3,352
33.56701
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.io.File; import java.io.IOException; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import com.google.common.annotations.VisibleForTesting; /** * A collection of {@link LocalizedResource}s all of same * {@link LocalResourceVisibility}. * */ class LocalResourcesTrackerImpl implements LocalResourcesTracker { static final Log LOG = LogFactory.getLog(LocalResourcesTrackerImpl.class); private static final String RANDOM_DIR_REGEX = "-?\\d+"; private static final Pattern RANDOM_DIR_PATTERN = Pattern .compile(RANDOM_DIR_REGEX); private final String user; private final ApplicationId appId; private final Dispatcher dispatcher; private final ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc; private Configuration conf; /* * This flag controls whether this resource tracker uses hierarchical * directories or not. For PRIVATE and PUBLIC resource trackers it * will be set whereas for APPLICATION resource tracker it would * be false. */ private final boolean useLocalCacheDirectoryManager; private ConcurrentHashMap<Path, LocalCacheDirectoryManager> directoryManagers; /* * It is used to keep track of resource into hierarchical directory * while it is getting downloaded. It is useful for reference counting * in case resource localization fails. */ private ConcurrentHashMap<LocalResourceRequest, Path> inProgressLocalResourcesMap; /* * starting with 10 to accommodate 0-9 directories created as a part of * LocalCacheDirectoryManager. So there will be one unique number generator * per APPLICATION, USER and PUBLIC cache. */ private AtomicLong uniqueNumberGenerator = new AtomicLong(9); private NMStateStoreService stateStore; public LocalResourcesTrackerImpl(String user, ApplicationId appId, Dispatcher dispatcher, boolean useLocalCacheDirectoryManager, Configuration conf, NMStateStoreService stateStore) { this(user, appId, dispatcher, new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(), useLocalCacheDirectoryManager, conf, stateStore); } LocalResourcesTrackerImpl(String user, ApplicationId appId, Dispatcher dispatcher, ConcurrentMap<LocalResourceRequest,LocalizedResource> localrsrc, boolean useLocalCacheDirectoryManager, Configuration conf, NMStateStoreService stateStore) { this.appId = appId; this.user = user; this.dispatcher = dispatcher; this.localrsrc = localrsrc; this.useLocalCacheDirectoryManager = useLocalCacheDirectoryManager; if ( this.useLocalCacheDirectoryManager) { directoryManagers = new ConcurrentHashMap<Path, LocalCacheDirectoryManager>(); inProgressLocalResourcesMap = new ConcurrentHashMap<LocalResourceRequest, Path>(); } this.conf = conf; this.stateStore = stateStore; } /* * Synchronizing this method for avoiding races due to multiple ResourceEvent's * coming to LocalResourcesTracker from Public/Private localizer and * Resource Localization Service. */ @Override public synchronized void handle(ResourceEvent event) { LocalResourceRequest req = event.getLocalResourceRequest(); LocalizedResource rsrc = localrsrc.get(req); switch (event.getType()) { case LOCALIZED: if (useLocalCacheDirectoryManager) { inProgressLocalResourcesMap.remove(req); } break; case REQUEST: if (rsrc != null && (!isResourcePresent(rsrc))) { LOG.info("Resource " + rsrc.getLocalPath() + " is missing, localizing it again"); removeResource(req); rsrc = null; } if (null == rsrc) { rsrc = new LocalizedResource(req, dispatcher); localrsrc.put(req, rsrc); } break; case RELEASE: if (null == rsrc) { // The container sent a release event on a resource which // 1) Failed // 2) Removed for some reason (ex. disk is no longer accessible) ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event; LOG.info("Container " + relEvent.getContainer() + " sent RELEASE event on a resource request " + req + " not present in cache."); return; } break; case LOCALIZATION_FAILED: /* * If resource localization fails then Localized resource will be * removed from local cache. */ removeResource(req); break; case RECOVERED: if (rsrc != null) { LOG.warn("Ignoring attempt to recover existing resource " + rsrc); return; } rsrc = recoverResource(req, (ResourceRecoveredEvent) event); localrsrc.put(req, rsrc); break; } rsrc.handle(event); if (event.getType() == ResourceEventType.LOCALIZED) { if (rsrc.getLocalPath() != null) { try { stateStore.finishResourceLocalization(user, appId, buildLocalizedResourceProto(rsrc)); } catch (IOException ioe) { LOG.error("Error storing resource state for " + rsrc, ioe); } } else { LOG.warn("Resource " + rsrc + " localized without a location"); } } } private LocalizedResource recoverResource(LocalResourceRequest req, ResourceRecoveredEvent event) { // unique number for a resource is the directory of the resource Path localDir = event.getLocalPath().getParent(); long rsrcId = Long.parseLong(localDir.getName()); // update ID generator to avoid conflicts with existing resources while (true) { long currentRsrcId = uniqueNumberGenerator.get(); long nextRsrcId = Math.max(currentRsrcId, rsrcId); if (uniqueNumberGenerator.compareAndSet(currentRsrcId, nextRsrcId)) { break; } } incrementFileCountForLocalCacheDirectory(localDir.getParent()); return new LocalizedResource(req, dispatcher); } private LocalizedResourceProto buildLocalizedResourceProto( LocalizedResource rsrc) { return LocalizedResourceProto.newBuilder() .setResource(buildLocalResourceProto(rsrc.getRequest())) .setLocalPath(rsrc.getLocalPath().toString()) .setSize(rsrc.getSize()) .build(); } private LocalResourceProto buildLocalResourceProto(LocalResource lr) { LocalResourcePBImpl lrpb; if (!(lr instanceof LocalResourcePBImpl)) { lr = LocalResource.newInstance(lr.getResource(), lr.getType(), lr.getVisibility(), lr.getSize(), lr.getTimestamp(), lr.getPattern()); } lrpb = (LocalResourcePBImpl) lr; return lrpb.getProto(); } public void incrementFileCountForLocalCacheDirectory(Path cacheDir) { if (useLocalCacheDirectoryManager) { Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot( cacheDir); if (cacheRoot != null) { LocalCacheDirectoryManager dir = directoryManagers.get(cacheRoot); if (dir == null) { dir = new LocalCacheDirectoryManager(conf); LocalCacheDirectoryManager otherDir = directoryManagers.putIfAbsent(cacheRoot, dir); if (otherDir != null) { dir = otherDir; } } if (cacheDir.equals(cacheRoot)) { dir.incrementFileCountForPath(""); } else { String dirStr = cacheDir.toUri().getRawPath(); String rootStr = cacheRoot.toUri().getRawPath(); dir.incrementFileCountForPath( dirStr.substring(rootStr.length() + 1)); } } } } /* * Update the file-count statistics for a local cache-directory. * This will retrieve the localized path for the resource from * 1) inProgressRsrcMap if the resource was under localization and it * failed. * 2) LocalizedResource if the resource is already localized. * From this path it will identify the local directory under which the * resource was localized. Then rest of the path will be used to decrement * file count for the HierarchicalSubDirectory pointing to this relative * path. */ private void decrementFileCountForLocalCacheDirectory(LocalResourceRequest req, LocalizedResource rsrc) { if ( useLocalCacheDirectoryManager) { Path rsrcPath = null; if (inProgressLocalResourcesMap.containsKey(req)) { // This happens when localization of a resource fails. rsrcPath = inProgressLocalResourcesMap.remove(req); } else if (rsrc != null && rsrc.getLocalPath() != null) { rsrcPath = rsrc.getLocalPath().getParent().getParent(); } if (rsrcPath != null) { Path parentPath = new Path(rsrcPath.toUri().getRawPath()); while (!directoryManagers.containsKey(parentPath)) { parentPath = parentPath.getParent(); if ( parentPath == null) { return; } } if ( parentPath != null) { String parentDir = parentPath.toUri().getRawPath().toString(); LocalCacheDirectoryManager dir = directoryManagers.get(parentPath); String rsrcDir = rsrcPath.toUri().getRawPath(); if (rsrcDir.equals(parentDir)) { dir.decrementFileCountForPath(""); } else { dir.decrementFileCountForPath( rsrcDir.substring( parentDir.length() + 1)); } } } } } /** * This module checks if the resource which was localized is already present * or not * * @param rsrc * @return true/false based on resource is present or not */ public boolean isResourcePresent(LocalizedResource rsrc) { boolean ret = true; if (rsrc.getState() == ResourceState.LOCALIZED) { File file = new File(rsrc.getLocalPath().toUri().getRawPath(). toString()); if (!file.exists()) { ret = false; } } return ret; } @Override public boolean remove(LocalizedResource rem, DeletionService delService) { // current synchronization guaranteed by crude RLS event for cleanup LocalizedResource rsrc = localrsrc.get(rem.getRequest()); if (null == rsrc) { LOG.error("Attempt to remove absent resource: " + rem.getRequest() + " from " + getUser()); return true; } if (rsrc.getRefCount() > 0 || ResourceState.DOWNLOADING.equals(rsrc.getState()) || rsrc != rem) { // internal error LOG.error("Attempt to remove resource: " + rsrc + " with non-zero refcount"); return false; } else { // ResourceState is LOCALIZED or INIT if (ResourceState.LOCALIZED.equals(rsrc.getState())) { delService.delete(getUser(), getPathToDelete(rsrc.getLocalPath())); } removeResource(rem.getRequest()); LOG.info("Removed " + rsrc.getLocalPath() + " from localized cache"); return true; } } private void removeResource(LocalResourceRequest req) { LocalizedResource rsrc = localrsrc.remove(req); decrementFileCountForLocalCacheDirectory(req, rsrc); if (rsrc != null) { Path localPath = rsrc.getLocalPath(); if (localPath != null) { try { stateStore.removeLocalizedResource(user, appId, localPath); } catch (IOException e) { LOG.error("Unable to remove resource " + rsrc + " from state store", e); } } } } /** * Returns the path up to the random directory component. */ private Path getPathToDelete(Path localPath) { Path delPath = localPath.getParent(); String name = delPath.getName(); Matcher matcher = RANDOM_DIR_PATTERN.matcher(name); if (matcher.matches()) { return delPath; } else { LOG.warn("Random directory component did not match. " + "Deleting localized path only"); return localPath; } } @Override public String getUser() { return user; } @Override public Iterator<LocalizedResource> iterator() { return localrsrc.values().iterator(); } /** * @return {@link Path} absolute path for localization which includes local * directory path and the relative hierarchical path (if use local * cache directory manager is enabled) * * @param {@link LocalResourceRequest} Resource localization request to * localize the resource. * @param {@link Path} local directory path */ @Override public Path getPathForLocalization(LocalResourceRequest req, Path localDirPath) { Path rPath = localDirPath; if (useLocalCacheDirectoryManager && localDirPath != null) { if (!directoryManagers.containsKey(localDirPath)) { directoryManagers.putIfAbsent(localDirPath, new LocalCacheDirectoryManager(conf)); } LocalCacheDirectoryManager dir = directoryManagers.get(localDirPath); rPath = localDirPath; String hierarchicalPath = dir.getRelativePathForLocalization(); // For most of the scenarios we will get root path only which // is an empty string if (!hierarchicalPath.isEmpty()) { rPath = new Path(localDirPath, hierarchicalPath); } inProgressLocalResourcesMap.put(req, rPath); } rPath = new Path(rPath, Long.toString(uniqueNumberGenerator.incrementAndGet())); Path localPath = new Path(rPath, req.getPath().getName()); LocalizedResource rsrc = localrsrc.get(req); rsrc.setLocalPath(localPath); LocalResource lr = LocalResource.newInstance(req.getResource(), req.getType(), req.getVisibility(), req.getSize(), req.getTimestamp()); try { stateStore.startResourceLocalization(user, appId, ((LocalResourcePBImpl) lr).getProto(), localPath); } catch (IOException e) { LOG.error("Unable to record localization start for " + rsrc, e); } return rPath; } @Override public LocalizedResource getLocalizedResource(LocalResourceRequest request) { return localrsrc.get(request); } @VisibleForTesting LocalCacheDirectoryManager getDirectoryManager(Path localDirPath) { LocalCacheDirectoryManager mgr = null; if (useLocalCacheDirectoryManager) { mgr = directoryManagers.get(localDirPath); } return mgr; } }
16,410
35.550111
105
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletionService; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask; import org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceFailedLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources; import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider; import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerBuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.FSDownload; import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheBuilder; import com.google.common.cache.LoadingCache; import com.google.common.util.concurrent.ThreadFactoryBuilder; public class ResourceLocalizationService extends CompositeService implements EventHandler<LocalizationEvent>, LocalizationProtocol { private static final Log LOG = LogFactory.getLog(ResourceLocalizationService.class); public static final String NM_PRIVATE_DIR = "nmPrivate"; public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 0700); private Server server; private InetSocketAddress localizationServerAddress; private long cacheTargetSize; private long cacheCleanupPeriod; private final ContainerExecutor exec; protected final Dispatcher dispatcher; private final DeletionService delService; private LocalizerTracker localizerTracker; private RecordFactory recordFactory; private final ScheduledExecutorService cacheCleanup; private LocalizerTokenSecretManager secretManager; private NMStateStoreService stateStore; private LocalResourcesTracker publicRsrc; private LocalDirsHandlerService dirsHandler; private DirsChangeListener localDirsChangeListener; private DirsChangeListener logDirsChangeListener; private Context nmContext; /** * Map of LocalResourceTrackers keyed by username, for private * resources. */ private final ConcurrentMap<String,LocalResourcesTracker> privateRsrc = new ConcurrentHashMap<String,LocalResourcesTracker>(); /** * Map of LocalResourceTrackers keyed by appid, for application * resources. */ private final ConcurrentMap<String,LocalResourcesTracker> appRsrc = new ConcurrentHashMap<String,LocalResourcesTracker>(); FileContext lfs; public ResourceLocalizationService(Dispatcher dispatcher, ContainerExecutor exec, DeletionService delService, LocalDirsHandlerService dirsHandler, Context context) { super(ResourceLocalizationService.class.getName()); this.exec = exec; this.dispatcher = dispatcher; this.delService = delService; this.dirsHandler = dirsHandler; this.cacheCleanup = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder() .setNameFormat("ResourceLocalizationService Cache Cleanup") .build()); this.stateStore = context.getNMStateStore(); this.nmContext = context; } FileContext getLocalFileContext(Configuration conf) { try { return FileContext.getLocalFSFileContext(conf); } catch (IOException e) { throw new YarnRuntimeException("Failed to access local fs"); } } private void validateConf(Configuration conf) { int perDirFileLimit = conf.getInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, YarnConfiguration.DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY); if (perDirFileLimit <= 36) { LOG.error(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with very low value."); throw new YarnRuntimeException( YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with a value less than 37."); } else { LOG.info("per directory file limit = " + perDirFileLimit); } } @Override public void serviceInit(Configuration conf) throws Exception { this.validateConf(conf); this.publicRsrc = new LocalResourcesTrackerImpl(null, null, dispatcher, true, conf, stateStore); this.recordFactory = RecordFactoryProvider.getRecordFactory(conf); try { lfs = getLocalFileContext(conf); lfs.setUMask(new FsPermission((short) FsPermission.DEFAULT_UMASK)); if (!stateStore.canRecover()|| stateStore.isNewlyCreated()) { cleanUpLocalDirs(lfs, delService); initializeLocalDirs(lfs); initializeLogDirs(lfs); } } catch (Exception e) { throw new YarnRuntimeException( "Failed to initialize LocalizationService", e); } cacheTargetSize = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20; cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS); localizationServerAddress = conf.getSocketAddr( YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); localizerTracker = createLocalizerTracker(conf); addService(localizerTracker); dispatcher.register(LocalizerEventType.class, localizerTracker); localDirsChangeListener = new DirsChangeListener() { @Override public void onDirsChanged() { checkAndInitializeLocalDirs(); } }; logDirsChangeListener = new DirsChangeListener() { @Override public void onDirsChanged() { initializeLogDirs(lfs); } }; super.serviceInit(conf); } //Recover localized resources after an NM restart public void recoverLocalizedResources(RecoveredLocalizationState state) throws URISyntaxException { LocalResourceTrackerState trackerState = state.getPublicTrackerState(); recoverTrackerResources(publicRsrc, trackerState); for (Map.Entry<String, RecoveredUserResources> userEntry : state.getUserResources().entrySet()) { String user = userEntry.getKey(); RecoveredUserResources userResources = userEntry.getValue(); trackerState = userResources.getPrivateTrackerState(); if (!trackerState.isEmpty()) { LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, true, super.getConfig(), stateStore); LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user, tracker); if (oldTracker != null) { tracker = oldTracker; } recoverTrackerResources(tracker, trackerState); } for (Map.Entry<ApplicationId, LocalResourceTrackerState> appEntry : userResources.getAppTrackerStates().entrySet()) { trackerState = appEntry.getValue(); if (!trackerState.isEmpty()) { ApplicationId appId = appEntry.getKey(); String appIdStr = ConverterUtils.toString(appId); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, false, super.getConfig(), stateStore); LocalResourcesTracker oldTracker = appRsrc.putIfAbsent(appIdStr, tracker); if (oldTracker != null) { tracker = oldTracker; } recoverTrackerResources(tracker, trackerState); } } } } private void recoverTrackerResources(LocalResourcesTracker tracker, LocalResourceTrackerState state) throws URISyntaxException { for (LocalizedResourceProto proto : state.getLocalizedResources()) { LocalResource rsrc = new LocalResourcePBImpl(proto.getResource()); LocalResourceRequest req = new LocalResourceRequest(rsrc); if (LOG.isDebugEnabled()) { LOG.debug("Recovering localized resource " + req + " at " + proto.getLocalPath()); } tracker.handle(new ResourceRecoveredEvent(req, new Path(proto.getLocalPath()), proto.getSize())); } for (Map.Entry<LocalResourceProto, Path> entry : state.getInProgressResources().entrySet()) { LocalResource rsrc = new LocalResourcePBImpl(entry.getKey()); LocalResourceRequest req = new LocalResourceRequest(rsrc); Path localPath = entry.getValue(); tracker.handle(new ResourceRecoveredEvent(req, localPath, 0)); // delete any in-progress localizations, containers will request again LOG.info("Deleting in-progress localization for " + req + " at " + localPath); tracker.remove(tracker.getLocalizedResource(req), delService); } // TODO: remove untracked directories in local filesystem } @Override public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) { return localizerTracker.processHeartbeat(status); } @Override public void serviceStart() throws Exception { cacheCleanup.scheduleWithFixedDelay(new CacheCleanup(dispatcher), cacheCleanupPeriod, cacheCleanupPeriod, TimeUnit.MILLISECONDS); server = createServer(); server.start(); localizationServerAddress = getConfig().updateConnectAddr(YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, server.getListenerAddress()); LOG.info("Localizer started on port " + server.getPort()); super.serviceStart(); dirsHandler.registerLocalDirsChangeListener(localDirsChangeListener); dirsHandler.registerLogDirsChangeListener(logDirsChangeListener); } LocalizerTracker createLocalizerTracker(Configuration conf) { return new LocalizerTracker(conf); } Server createServer() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); if (UserGroupInformation.isSecurityEnabled()) { secretManager = new LocalizerTokenSecretManager(); } Server server = rpc.getServer(LocalizationProtocol.class, this, localizationServerAddress, conf, secretManager, conf.getInt(YarnConfiguration.NM_LOCALIZER_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new NMPolicyProvider()); } return server; } @Override public void serviceStop() throws Exception { dirsHandler.deregisterLocalDirsChangeListener(localDirsChangeListener); dirsHandler.deregisterLogDirsChangeListener(logDirsChangeListener); if (server != null) { server.stop(); } cacheCleanup.shutdown(); super.serviceStop(); } @Override public void handle(LocalizationEvent event) { // TODO: create log dir as $logdir/$user/$appId switch (event.getType()) { case INIT_APPLICATION_RESOURCES: handleInitApplicationResources( ((ApplicationLocalizationEvent)event).getApplication()); break; case INIT_CONTAINER_RESOURCES: handleInitContainerResources((ContainerLocalizationRequestEvent) event); break; case CONTAINER_RESOURCES_LOCALIZED: handleContainerResourcesLocalized((ContainerLocalizationEvent) event); break; case CACHE_CLEANUP: handleCacheCleanup(event); break; case CLEANUP_CONTAINER_RESOURCES: handleCleanupContainerResources((ContainerLocalizationCleanupEvent)event); break; case DESTROY_APPLICATION_RESOURCES: handleDestroyApplicationResources( ((ApplicationLocalizationEvent)event).getApplication()); break; default: throw new YarnRuntimeException("Unknown localization event: " + event); } } /** * Handle event received the first time any container is scheduled * by a given application. */ @SuppressWarnings("unchecked") private void handleInitApplicationResources(Application app) { // 0) Create application tracking structs String userName = app.getUser(); privateRsrc.putIfAbsent(userName, new LocalResourcesTrackerImpl(userName, null, dispatcher, true, super.getConfig(), stateStore)); String appIdStr = ConverterUtils.toString(app.getAppId()); appRsrc.putIfAbsent(appIdStr, new LocalResourcesTrackerImpl(app.getUser(), app.getAppId(), dispatcher, false, super.getConfig(), stateStore)); // 1) Signal container init // // This is handled by the ApplicationImpl state machine and allows // containers to proceed with launching. dispatcher.getEventHandler().handle(new ApplicationInitedEvent( app.getAppId())); } /** * For each of the requested resources for a container, determines the * appropriate {@link LocalResourcesTracker} and forwards a * {@link LocalResourceRequest} to that tracker. */ private void handleInitContainerResources( ContainerLocalizationRequestEvent rsrcReqs) { Container c = rsrcReqs.getContainer(); // create a loading cache for the file statuses LoadingCache<Path,Future<FileStatus>> statCache = CacheBuilder.newBuilder().build(FSDownload.createStatusCacheLoader(getConfig())); LocalizerContext ctxt = new LocalizerContext( c.getUser(), c.getContainerId(), c.getCredentials(), statCache); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = rsrcReqs.getRequestedResources(); for (Map.Entry<LocalResourceVisibility, Collection<LocalResourceRequest>> e : rsrcs.entrySet()) { LocalResourcesTracker tracker = getLocalResourcesTracker(e.getKey(), c.getUser(), c.getContainerId().getApplicationAttemptId() .getApplicationId()); for (LocalResourceRequest req : e.getValue()) { tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt)); if (LOG.isDebugEnabled()) { LOG.debug("Localizing " + req.getPath() + " for container " + c.getContainerId()); } } } } /** * Once a container's resources are localized, kill the corresponding * {@link ContainerLocalizer} */ private void handleContainerResourcesLocalized( ContainerLocalizationEvent event) { Container c = event.getContainer(); String locId = ConverterUtils.toString(c.getContainerId()); localizerTracker.endContainerLocalization(locId); } private void handleCacheCleanup(LocalizationEvent event) { ResourceRetentionSet retain = new ResourceRetentionSet(delService, cacheTargetSize); retain.addResources(publicRsrc); if (LOG.isDebugEnabled()) { LOG.debug("Resource cleanup (public) " + retain); } for (LocalResourcesTracker t : privateRsrc.values()) { retain.addResources(t); if (LOG.isDebugEnabled()) { LOG.debug("Resource cleanup " + t.getUser() + ":" + retain); } } //TODO Check if appRsrcs should also be added to the retention set. } @SuppressWarnings("unchecked") private void handleCleanupContainerResources( ContainerLocalizationCleanupEvent rsrcCleanup) { Container c = rsrcCleanup.getContainer(); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = rsrcCleanup.getResources(); for (Map.Entry<LocalResourceVisibility, Collection<LocalResourceRequest>> e : rsrcs.entrySet()) { LocalResourcesTracker tracker = getLocalResourcesTracker(e.getKey(), c.getUser(), c.getContainerId().getApplicationAttemptId() .getApplicationId()); for (LocalResourceRequest req : e.getValue()) { tracker.handle(new ResourceReleaseEvent(req, c.getContainerId())); } } String locId = ConverterUtils.toString(c.getContainerId()); localizerTracker.cleanupPrivLocalizers(locId); // Delete the container directories String userName = c.getUser(); String containerIDStr = c.toString(); String appIDStr = ConverterUtils.toString( c.getContainerId().getApplicationAttemptId().getApplicationId()); // Try deleting from good local dirs and full local dirs because a dir might // have gone bad while the app was running(disk full). In addition // a dir might have become good while the app was running. // Check if the container dir exists and if it does, try to delete it for (String localDir : dirsHandler.getLocalDirsForCleanup()) { // Delete the user-owned container-dir Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, userName); Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE); Path appDir = new Path(allAppsdir, appIDStr); Path containerDir = new Path(appDir, containerIDStr); submitDirForDeletion(userName, containerDir); // Delete the nmPrivate container-dir Path sysDir = new Path(localDir, NM_PRIVATE_DIR); Path appSysDir = new Path(sysDir, appIDStr); Path containerSysDir = new Path(appSysDir, containerIDStr); submitDirForDeletion(null, containerSysDir); } dispatcher.getEventHandler().handle( new ContainerEvent(c.getContainerId(), ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP)); } private void submitDirForDeletion(String userName, Path dir) { try { lfs.getFileStatus(dir); delService.delete(userName, dir, new Path[] {}); } catch (UnsupportedFileSystemException ue) { LOG.warn("Local dir " + dir + " is an unsupported filesystem", ue); } catch (IOException ie) { // ignore return; } } @SuppressWarnings({"unchecked"}) private void handleDestroyApplicationResources(Application application) { String userName = application.getUser(); ApplicationId appId = application.getAppId(); String appIDStr = application.toString(); LocalResourcesTracker appLocalRsrcsTracker = appRsrc.remove(ConverterUtils.toString(appId)); if (appLocalRsrcsTracker != null) { for (LocalizedResource rsrc : appLocalRsrcsTracker ) { Path localPath = rsrc.getLocalPath(); if (localPath != null) { try { stateStore.removeLocalizedResource(userName, appId, localPath); } catch (IOException e) { LOG.error("Unable to remove resource " + rsrc + " for " + appIDStr + " from state store", e); } } } } else { LOG.warn("Removing uninitialized application " + application); } // Delete the application directories userName = application.getUser(); appIDStr = application.toString(); for (String localDir : dirsHandler.getLocalDirsForCleanup()) { // Delete the user-owned app-dir Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, userName); Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE); Path appDir = new Path(allAppsdir, appIDStr); submitDirForDeletion(userName, appDir); // Delete the nmPrivate app-dir Path sysDir = new Path(localDir, NM_PRIVATE_DIR); Path appSysDir = new Path(sysDir, appIDStr); submitDirForDeletion(null, appSysDir); } // TODO: decrement reference counts of all resources associated with this // app dispatcher.getEventHandler().handle(new ApplicationEvent( application.getAppId(), ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); } LocalResourcesTracker getLocalResourcesTracker( LocalResourceVisibility visibility, String user, ApplicationId appId) { switch (visibility) { default: case PUBLIC: return publicRsrc; case PRIVATE: return privateRsrc.get(user); case APPLICATION: return appRsrc.get(ConverterUtils.toString(appId)); } } private String getUserFileCachePath(String user) { return StringUtils.join(Path.SEPARATOR, Arrays.asList(".", ContainerLocalizer.USERCACHE, user, ContainerLocalizer.FILECACHE)); } private String getAppFileCachePath(String user, String appId) { return StringUtils.join(Path.SEPARATOR, Arrays.asList(".", ContainerLocalizer.USERCACHE, user, ContainerLocalizer.APPCACHE, appId, ContainerLocalizer.FILECACHE)); } @VisibleForTesting @Private public PublicLocalizer getPublicLocalizer() { return localizerTracker.publicLocalizer; } @VisibleForTesting @Private public LocalizerRunner getLocalizerRunner(String locId) { return localizerTracker.privLocalizers.get(locId); } @VisibleForTesting @Private public Map<String, LocalizerRunner> getPrivateLocalizers() { return localizerTracker.privLocalizers; } /** * Sub-component handling the spawning of {@link ContainerLocalizer}s */ class LocalizerTracker extends AbstractService implements EventHandler<LocalizerEvent> { private final PublicLocalizer publicLocalizer; private final Map<String,LocalizerRunner> privLocalizers; LocalizerTracker(Configuration conf) { this(conf, new HashMap<String,LocalizerRunner>()); } LocalizerTracker(Configuration conf, Map<String,LocalizerRunner> privLocalizers) { super(LocalizerTracker.class.getName()); this.publicLocalizer = new PublicLocalizer(conf); this.privLocalizers = privLocalizers; } @Override public synchronized void serviceStart() throws Exception { publicLocalizer.start(); super.serviceStart(); } public LocalizerHeartbeatResponse processHeartbeat(LocalizerStatus status) { String locId = status.getLocalizerId(); synchronized (privLocalizers) { LocalizerRunner localizer = privLocalizers.get(locId); if (null == localizer) { // TODO process resources anyway LOG.info("Unknown localizer with localizerId " + locId + " is sending heartbeat. Ordering it to DIE"); LocalizerHeartbeatResponse response = recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class); response.setLocalizerAction(LocalizerAction.DIE); return response; } return localizer.processHeartbeat(status.getResources()); } } @Override public void serviceStop() throws Exception { for (LocalizerRunner localizer : privLocalizers.values()) { localizer.interrupt(); } publicLocalizer.interrupt(); super.serviceStop(); } @Override public void handle(LocalizerEvent event) { String locId = event.getLocalizerId(); switch (event.getType()) { case REQUEST_RESOURCE_LOCALIZATION: // 0) find running localizer or start new thread LocalizerResourceRequestEvent req = (LocalizerResourceRequestEvent)event; switch (req.getVisibility()) { case PUBLIC: publicLocalizer.addResource(req); break; case PRIVATE: case APPLICATION: synchronized (privLocalizers) { LocalizerRunner localizer = privLocalizers.get(locId); if (null == localizer) { LOG.info("Created localizer for " + locId); localizer = new LocalizerRunner(req.getContext(), locId); privLocalizers.put(locId, localizer); localizer.start(); } // 1) propagate event localizer.addResource(req); } break; } break; } } public void cleanupPrivLocalizers(String locId) { synchronized (privLocalizers) { LocalizerRunner localizer = privLocalizers.get(locId); if (null == localizer) { return; // ignore; already gone } privLocalizers.remove(locId); localizer.interrupt(); } } public void endContainerLocalization(String locId) { LocalizerRunner localizer; synchronized (privLocalizers) { localizer = privLocalizers.get(locId); if (null == localizer) { return; // ignore } } localizer.endContainerLocalization(); } } private static ExecutorService createLocalizerExecutor(Configuration conf) { int nThreads = conf.getInt( YarnConfiguration.NM_LOCALIZER_FETCH_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT); ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("PublicLocalizer #%d") .build(); return Executors.newFixedThreadPool(nThreads, tf); } class PublicLocalizer extends Thread { final FileContext lfs; final Configuration conf; final ExecutorService threadPool; final CompletionService<Path> queue; // Its shared between public localizer and dispatcher thread. final Map<Future<Path>,LocalizerResourceRequestEvent> pending; PublicLocalizer(Configuration conf) { super("Public Localizer"); this.lfs = getLocalFileContext(conf); this.conf = conf; this.pending = Collections.synchronizedMap( new HashMap<Future<Path>, LocalizerResourceRequestEvent>()); this.threadPool = createLocalizerExecutor(conf); this.queue = new ExecutorCompletionService<Path>(threadPool); } public void addResource(LocalizerResourceRequestEvent request) { // TODO handle failures, cancellation, requests by other containers LocalizedResource rsrc = request.getResource(); LocalResourceRequest key = rsrc.getRequest(); LOG.info("Downloading public rsrc:" + key); /* * Here multiple containers may request the same resource. So we need * to start downloading only when * 1) ResourceState == DOWNLOADING * 2) We are able to acquire non blocking semaphore lock. * If not we will skip this resource as either it is getting downloaded * or it FAILED / LOCALIZED. */ if (rsrc.tryAcquire()) { if (rsrc.getState() == ResourceState.DOWNLOADING) { LocalResource resource = request.getResource().getRequest(); try { Path publicRootPath = dirsHandler.getLocalPathForWrite("." + Path.SEPARATOR + ContainerLocalizer.FILECACHE, ContainerLocalizer.getEstimatedSize(resource), true); Path publicDirDestPath = publicRsrc.getPathForLocalization(key, publicRootPath); if (!publicDirDestPath.getParent().equals(publicRootPath)) { DiskChecker.checkDir(new File(publicDirDestPath.toUri().getPath())); } // explicitly synchronize pending here to avoid future task // completing and being dequeued before pending updated synchronized (pending) { pending.put(queue.submit(new FSDownload(lfs, null, conf, publicDirDestPath, resource, request.getContext().getStatCache())), request); } } catch (IOException e) { rsrc.unlock(); publicRsrc.handle(new ResourceFailedLocalizationEvent(request .getResource().getRequest(), e.getMessage())); LOG.error("Local path for public localization is not found. " + " May be disks failed.", e); } catch (IllegalArgumentException ie) { rsrc.unlock(); publicRsrc.handle(new ResourceFailedLocalizationEvent(request .getResource().getRequest(), ie.getMessage())); LOG.error("Local path for public localization is not found. " + " Incorrect path. " + request.getResource().getRequest() .getPath(), ie); } catch (RejectedExecutionException re) { rsrc.unlock(); publicRsrc.handle(new ResourceFailedLocalizationEvent(request .getResource().getRequest(), re.getMessage())); LOG.error("Failed to submit rsrc " + rsrc + " for download." + " Either queue is full or threadpool is shutdown.", re); } } else { rsrc.unlock(); } } } @Override public void run() { try { // TODO shutdown, better error handling esp. DU while (!Thread.currentThread().isInterrupted()) { try { Future<Path> completed = queue.take(); LocalizerResourceRequestEvent assoc = pending.remove(completed); try { Path local = completed.get(); if (null == assoc) { LOG.error("Localized unknown resource to " + completed); // TODO delete return; } LocalResourceRequest key = assoc.getResource().getRequest(); publicRsrc.handle(new ResourceLocalizedEvent(key, local, FileUtil .getDU(new File(local.toUri())))); assoc.getResource().unlock(); } catch (ExecutionException e) { LOG.info("Failed to download resource " + assoc.getResource(), e.getCause()); LocalResourceRequest req = assoc.getResource().getRequest(); publicRsrc.handle(new ResourceFailedLocalizationEvent(req, e.getMessage())); assoc.getResource().unlock(); } catch (CancellationException e) { // ignore; shutting down } } catch (InterruptedException e) { return; } } } catch(Throwable t) { LOG.fatal("Error: Shutting down", t); } finally { LOG.info("Public cache exiting"); threadPool.shutdownNow(); } } } /** * Runs the {@link ContainerLocalizer} itself in a separate process with * access to user's credentials. One {@link LocalizerRunner} per localizerId. * */ class LocalizerRunner extends Thread { final LocalizerContext context; final String localizerId; final Map<LocalResourceRequest,LocalizerResourceRequestEvent> scheduled; // Its a shared list between Private Localizer and dispatcher thread. final List<LocalizerResourceRequestEvent> pending; private AtomicBoolean killContainerLocalizer = new AtomicBoolean(false); // TODO: threadsafe, use outer? private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(getConfig()); LocalizerRunner(LocalizerContext context, String localizerId) { super("LocalizerRunner for " + localizerId); this.context = context; this.localizerId = localizerId; this.pending = Collections .synchronizedList(new ArrayList<LocalizerResourceRequestEvent>()); this.scheduled = new HashMap<LocalResourceRequest, LocalizerResourceRequestEvent>(); } public void addResource(LocalizerResourceRequestEvent request) { pending.add(request); } public void endContainerLocalization() { killContainerLocalizer.set(true); } /** * Find next resource to be given to a spawned localizer. * * @return the next resource to be localized */ private LocalResource findNextResource() { synchronized (pending) { for (Iterator<LocalizerResourceRequestEvent> i = pending.iterator(); i.hasNext();) { LocalizerResourceRequestEvent evt = i.next(); LocalizedResource nRsrc = evt.getResource(); // Resource download should take place ONLY if resource is in // Downloading state if (nRsrc.getState() != ResourceState.DOWNLOADING) { i.remove(); continue; } /* * Multiple containers will try to download the same resource. So the * resource download should start only if * 1) We can acquire a non blocking semaphore lock on resource * 2) Resource is still in DOWNLOADING state */ if (nRsrc.tryAcquire()) { if (nRsrc.getState() == ResourceState.DOWNLOADING) { LocalResourceRequest nextRsrc = nRsrc.getRequest(); LocalResource next = recordFactory.newRecordInstance(LocalResource.class); next.setResource(ConverterUtils.getYarnUrlFromPath(nextRsrc .getPath())); next.setTimestamp(nextRsrc.getTimestamp()); next.setType(nextRsrc.getType()); next.setVisibility(evt.getVisibility()); next.setPattern(evt.getPattern()); scheduled.put(nextRsrc, evt); return next; } else { // Need to release acquired lock nRsrc.unlock(); } } } return null; } } LocalizerHeartbeatResponse processHeartbeat( List<LocalResourceStatus> remoteResourceStatuses) { LocalizerHeartbeatResponse response = recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class); String user = context.getUser(); ApplicationId applicationId = context.getContainerId().getApplicationAttemptId().getApplicationId(); boolean fetchFailed = false; // Update resource statuses. for (LocalResourceStatus stat : remoteResourceStatuses) { LocalResource rsrc = stat.getResource(); LocalResourceRequest req = null; try { req = new LocalResourceRequest(rsrc); } catch (URISyntaxException e) { LOG.error( "Got exception in parsing URL of LocalResource:" + rsrc.getResource(), e); } LocalizerResourceRequestEvent assoc = scheduled.get(req); if (assoc == null) { // internal error LOG.error("Unknown resource reported: " + req); continue; } switch (stat.getStatus()) { case FETCH_SUCCESS: // notify resource try { getLocalResourcesTracker(req.getVisibility(), user, applicationId) .handle( new ResourceLocalizedEvent(req, ConverterUtils .getPathFromYarnURL(stat.getLocalPath()), stat.getLocalSize())); } catch (URISyntaxException e) { } // unlocking the resource and removing it from scheduled resource // list assoc.getResource().unlock(); scheduled.remove(req); break; case FETCH_PENDING: break; case FETCH_FAILURE: final String diagnostics = stat.getException().toString(); LOG.warn(req + " failed: " + diagnostics); fetchFailed = true; getLocalResourcesTracker(req.getVisibility(), user, applicationId) .handle(new ResourceFailedLocalizationEvent( req, diagnostics)); // unlocking the resource and removing it from scheduled resource // list assoc.getResource().unlock(); scheduled.remove(req); break; default: LOG.info("Unknown status: " + stat.getStatus()); fetchFailed = true; getLocalResourcesTracker(req.getVisibility(), user, applicationId) .handle(new ResourceFailedLocalizationEvent( req, stat.getException().getMessage())); break; } } if (fetchFailed || killContainerLocalizer.get()) { response.setLocalizerAction(LocalizerAction.DIE); return response; } // Give the localizer resources for remote-fetching. List<ResourceLocalizationSpec> rsrcs = new ArrayList<ResourceLocalizationSpec>(); /* * TODO : It doesn't support multiple downloads per ContainerLocalizer * at the same time. We need to think whether we should support this. */ LocalResource next = findNextResource(); if (next != null) { try { ResourceLocalizationSpec resource = NodeManagerBuilderUtils.newResourceLocalizationSpec(next, getPathForLocalization(next)); rsrcs.add(resource); } catch (IOException e) { LOG.error("local path for PRIVATE localization could not be " + "found. Disks might have failed.", e); } catch (IllegalArgumentException e) { LOG.error("Inorrect path for PRIVATE localization." + next.getResource().getFile(), e); } catch (URISyntaxException e) { LOG.error( "Got exception in parsing URL of LocalResource:" + next.getResource(), e); } } response.setLocalizerAction(LocalizerAction.LIVE); response.setResourceSpecs(rsrcs); return response; } private Path getPathForLocalization(LocalResource rsrc) throws IOException, URISyntaxException { String user = context.getUser(); ApplicationId appId = context.getContainerId().getApplicationAttemptId().getApplicationId(); LocalResourceVisibility vis = rsrc.getVisibility(); LocalResourcesTracker tracker = getLocalResourcesTracker(vis, user, appId); String cacheDirectory = null; if (vis == LocalResourceVisibility.PRIVATE) {// PRIVATE Only cacheDirectory = getUserFileCachePath(user); } else {// APPLICATION ONLY cacheDirectory = getAppFileCachePath(user, appId.toString()); } Path dirPath = dirsHandler.getLocalPathForWrite(cacheDirectory, ContainerLocalizer.getEstimatedSize(rsrc), false); return tracker.getPathForLocalization(new LocalResourceRequest(rsrc), dirPath); } @Override @SuppressWarnings("unchecked") // dispatcher not typed public void run() { Path nmPrivateCTokensPath = null; Throwable exception = null; try { // Get nmPrivateDir nmPrivateCTokensPath = dirsHandler.getLocalPathForWrite( NM_PRIVATE_DIR + Path.SEPARATOR + String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, localizerId)); // 0) init queue, etc. // 1) write credentials to private dir writeCredentials(nmPrivateCTokensPath); // 2) exec initApplication and wait if (dirsHandler.areDisksHealthy()) { exec.startLocalizer(new LocalizerStartContext.Builder() .setNmPrivateContainerTokens(nmPrivateCTokensPath) .setNmAddr(localizationServerAddress) .setUser(context.getUser()) .setAppId(ConverterUtils.toString(context.getContainerId() .getApplicationAttemptId().getApplicationId())) .setLocId(localizerId) .setDirsHandler(dirsHandler) .build()); } else { throw new IOException("All disks failed. " + dirsHandler.getDisksHealthReport(false)); } // TODO handle ExitCodeException separately? } catch (FSError fe) { exception = fe; } catch (Exception e) { exception = e; } finally { if (exception != null) { LOG.info("Localizer failed", exception); // On error, report failure to Container and signal ABORT // Notify resource of failed localization ContainerId cId = context.getContainerId(); dispatcher.getEventHandler().handle(new ContainerResourceFailedEvent( cId, null, exception.getMessage())); } for (LocalizerResourceRequestEvent event : scheduled.values()) { event.getResource().unlock(); } delService.delete(null, nmPrivateCTokensPath, new Path[] {}); } } private Credentials getSystemCredentialsSentFromRM( LocalizerContext localizerContext) throws IOException { ApplicationId appId = localizerContext.getContainerId().getApplicationAttemptId() .getApplicationId(); Credentials systemCredentials = nmContext.getSystemCredentialsForApps().get(appId); if (systemCredentials == null) { return null; } if (LOG.isDebugEnabled()) { LOG.debug("Adding new framework-token for " + appId + " for localization: " + systemCredentials.getAllTokens()); } return systemCredentials; } private void writeCredentials(Path nmPrivateCTokensPath) throws IOException { DataOutputStream tokenOut = null; try { Credentials credentials = context.getCredentials(); if (UserGroupInformation.isSecurityEnabled()) { Credentials systemCredentials = getSystemCredentialsSentFromRM(context); if (systemCredentials != null) { credentials = systemCredentials; } } FileContext lfs = getLocalFileContext(getConfig()); tokenOut = lfs.create(nmPrivateCTokensPath, EnumSet.of(CREATE, OVERWRITE)); LOG.info("Writing credentials to the nmPrivate file " + nmPrivateCTokensPath.toString() + ". Credentials list: "); if (LOG.isDebugEnabled()) { for (Token<? extends TokenIdentifier> tk : credentials .getAllTokens()) { LOG.debug(tk + " : " + buildTokenFingerprint(tk)); } } if (UserGroupInformation.isSecurityEnabled()) { credentials = new Credentials(credentials); LocalizerTokenIdentifier id = secretManager.createIdentifier(); Token<LocalizerTokenIdentifier> localizerToken = new Token<LocalizerTokenIdentifier>(id, secretManager); credentials.addToken(id.getKind(), localizerToken); } credentials.writeTokenStorageToStream(tokenOut); } finally { if (tokenOut != null) { tokenOut.close(); } } } } /** * Returns a fingerprint of a token. The fingerprint is suitable for use in * logging, because it cannot be used to determine the secret. The * fingerprint is built using the first 10 bytes of a SHA-256 hash of the * string encoding of the token. The returned string contains the hex * representation of each byte, delimited by a space. * * @param tk token * @return token fingerprint * @throws IOException if there is an I/O error */ @VisibleForTesting static String buildTokenFingerprint(Token<? extends TokenIdentifier> tk) throws IOException { char[] digest = DigestUtils.sha256Hex(tk.encodeToUrlString()).toCharArray(); StringBuilder fingerprint = new StringBuilder(); for (int i = 0; i < 10; ++i) { if (i > 0) { fingerprint.append(' '); } fingerprint.append(digest[2 * i]); fingerprint.append(digest[2 * i + 1]); } return fingerprint.toString(); } static class CacheCleanup extends Thread { private final Dispatcher dispatcher; public CacheCleanup(Dispatcher dispatcher) { super("CacheCleanup"); this.dispatcher = dispatcher; } @Override @SuppressWarnings("unchecked") // dispatcher not typed public void run() { dispatcher.getEventHandler().handle( new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP)); } } private void initializeLocalDirs(FileContext lfs) { List<String> localDirs = dirsHandler.getLocalDirs(); for (String localDir : localDirs) { initializeLocalDir(lfs, localDir); } } private void initializeLocalDir(FileContext lfs, String localDir) { Map<Path, FsPermission> pathPermissionMap = getLocalDirsPathPermissionsMap(localDir); for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) { FileStatus status; try { status = lfs.getFileStatus(entry.getKey()); } catch(FileNotFoundException fs) { status = null; } catch(IOException ie) { String msg = "Could not get file status for local dir " + entry.getKey(); LOG.warn(msg, ie); throw new YarnRuntimeException(msg, ie); } if(status == null) { try { lfs.mkdir(entry.getKey(), entry.getValue(), true); status = lfs.getFileStatus(entry.getKey()); } catch (IOException e) { String msg = "Could not initialize local dir " + entry.getKey(); LOG.warn(msg, e); throw new YarnRuntimeException(msg, e); } } FsPermission perms = status.getPermission(); if(!perms.equals(entry.getValue())) { try { lfs.setPermission(entry.getKey(), entry.getValue()); } catch(IOException ie) { String msg = "Could not set permissions for local dir " + entry.getKey(); LOG.warn(msg, ie); throw new YarnRuntimeException(msg, ie); } } } } private void initializeLogDirs(FileContext lfs) { List<String> logDirs = dirsHandler.getLogDirs(); for (String logDir : logDirs) { initializeLogDir(lfs, logDir); } } private void initializeLogDir(FileContext lfs, String logDir) { try { lfs.mkdir(new Path(logDir), null, true); } catch (FileAlreadyExistsException fe) { // do nothing } catch (IOException e) { String msg = "Could not initialize log dir " + logDir; LOG.warn(msg, e); throw new YarnRuntimeException(msg, e); } } private void cleanUpLocalDirs(FileContext lfs, DeletionService del) { for (String localDir : dirsHandler.getLocalDirsForCleanup()) { cleanUpLocalDir(lfs, del, localDir); } } private void cleanUpLocalDir(FileContext lfs, DeletionService del, String localDir) { long currentTimeStamp = System.currentTimeMillis(); renameLocalDir(lfs, localDir, ContainerLocalizer.USERCACHE, currentTimeStamp); renameLocalDir(lfs, localDir, ContainerLocalizer.FILECACHE, currentTimeStamp); renameLocalDir(lfs, localDir, ResourceLocalizationService.NM_PRIVATE_DIR, currentTimeStamp); try { deleteLocalDir(lfs, del, localDir); } catch (IOException e) { // Do nothing, just give the warning LOG.warn("Failed to delete localDir: " + localDir); } } private void renameLocalDir(FileContext lfs, String localDir, String localSubDir, long currentTimeStamp) { try { lfs.rename(new Path(localDir, localSubDir), new Path( localDir, localSubDir + "_DEL_" + currentTimeStamp)); } catch (FileNotFoundException ex) { // No need to handle this exception // localSubDir may not be exist } catch (Exception ex) { // Do nothing, just give the warning LOG.warn("Failed to rename the local file under " + localDir + "/" + localSubDir); } } private void deleteLocalDir(FileContext lfs, DeletionService del, String localDir) throws IOException { RemoteIterator<FileStatus> fileStatus = lfs.listStatus(new Path(localDir)); if (fileStatus != null) { while (fileStatus.hasNext()) { FileStatus status = fileStatus.next(); try { if (status.getPath().getName().matches(".*" + ContainerLocalizer.USERCACHE + "_DEL_.*")) { LOG.info("usercache path : " + status.getPath().toString()); cleanUpFilesPerUserDir(lfs, del, status.getPath()); } else if (status.getPath().getName() .matches(".*" + NM_PRIVATE_DIR + "_DEL_.*") || status.getPath().getName() .matches(".*" + ContainerLocalizer.FILECACHE + "_DEL_.*")) { del.delete(null, status.getPath(), new Path[] {}); } } catch (IOException ex) { // Do nothing, just give the warning LOG.warn("Failed to delete this local Directory: " + status.getPath().getName()); } } } } private void cleanUpFilesPerUserDir(FileContext lfs, DeletionService del, Path userDirPath) throws IOException { RemoteIterator<FileStatus> userDirStatus = lfs.listStatus(userDirPath); FileDeletionTask dependentDeletionTask = del.createFileDeletionTask(null, userDirPath, new Path[] {}); if (userDirStatus != null && userDirStatus.hasNext()) { List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>(); while (userDirStatus.hasNext()) { FileStatus status = userDirStatus.next(); String owner = status.getOwner(); FileDeletionTask deletionTask = del.createFileDeletionTask(owner, null, new Path[] { status.getPath() }); deletionTask.addFileDeletionTaskDependency(dependentDeletionTask); deletionTasks.add(deletionTask); } for (FileDeletionTask task : deletionTasks) { del.scheduleFileDeletionTask(task); } } else { del.scheduleFileDeletionTask(dependentDeletionTask); } } /** * Check each local dir to ensure it has been setup correctly and will * attempt to fix any issues it finds. * @return void */ @VisibleForTesting void checkAndInitializeLocalDirs() { List<String> dirs = dirsHandler.getLocalDirs(); List<String> checkFailedDirs = new ArrayList<String>(); for (String dir : dirs) { try { checkLocalDir(dir); } catch (YarnRuntimeException e) { checkFailedDirs.add(dir); } } for (String dir : checkFailedDirs) { LOG.info("Attempting to initialize " + dir); initializeLocalDir(lfs, dir); try { checkLocalDir(dir); } catch (YarnRuntimeException e) { String msg = "Failed to setup local dir " + dir + ", which was marked as good."; LOG.warn(msg, e); throw new YarnRuntimeException(msg, e); } } } private boolean checkLocalDir(String localDir) { Map<Path, FsPermission> pathPermissionMap = getLocalDirsPathPermissionsMap(localDir); for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) { FileStatus status; try { status = lfs.getFileStatus(entry.getKey()); } catch (Exception e) { String msg = "Could not carry out resource dir checks for " + localDir + ", which was marked as good"; LOG.warn(msg, e); throw new YarnRuntimeException(msg, e); } if (!status.getPermission().equals(entry.getValue())) { String msg = "Permissions incorrectly set for dir " + entry.getKey() + ", should be " + entry.getValue() + ", actual value = " + status.getPermission(); LOG.warn(msg); throw new YarnRuntimeException(msg); } } return true; } private Map<Path, FsPermission> getLocalDirsPathPermissionsMap(String localDir) { Map<Path, FsPermission> localDirPathFsPermissionsMap = new HashMap<Path, FsPermission>(); FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask()); FsPermission nmPrivatePermission = NM_PRIVATE_PERM.applyUMask(lfs.getUMask()); Path userDir = new Path(localDir, ContainerLocalizer.USERCACHE); Path fileDir = new Path(localDir, ContainerLocalizer.FILECACHE); Path sysDir = new Path(localDir, NM_PRIVATE_DIR); localDirPathFsPermissionsMap.put(userDir, defaultPermission); localDirPathFsPermissionsMap.put(fileDir, defaultPermission); localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission); return localDirPathFsPermissionsMap; } }
59,463
38.458527
143
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.LinkedList; import java.util.Queue; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceFailedLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; /** * Datum representing a localized resource. Holds the statemachine of a * resource. State of the resource is one of {@link ResourceState}. * */ public class LocalizedResource implements EventHandler<ResourceEvent> { private static final Log LOG = LogFactory.getLog(LocalizedResource.class); volatile Path localPath; volatile long size = -1; final LocalResourceRequest rsrc; final Dispatcher dispatcher; final StateMachine<ResourceState,ResourceEventType,ResourceEvent> stateMachine; final Semaphore sem = new Semaphore(1); final Queue<ContainerId> ref; // Queue of containers using this localized // resource private final Lock readLock; private final Lock writeLock; final AtomicLong timestamp = new AtomicLong(currentTime()); private static final StateMachineFactory<LocalizedResource,ResourceState, ResourceEventType,ResourceEvent> stateMachineFactory = new StateMachineFactory<LocalizedResource,ResourceState, ResourceEventType,ResourceEvent>(ResourceState.INIT) // From INIT (ref == 0, awaiting req) .addTransition(ResourceState.INIT, ResourceState.DOWNLOADING, ResourceEventType.REQUEST, new FetchResourceTransition()) .addTransition(ResourceState.INIT, ResourceState.LOCALIZED, ResourceEventType.RECOVERED, new RecoveredTransition()) // From DOWNLOADING (ref > 0, may be localizing) .addTransition(ResourceState.DOWNLOADING, ResourceState.DOWNLOADING, ResourceEventType.REQUEST, new FetchResourceTransition()) // TODO: Duplicate addition!! .addTransition(ResourceState.DOWNLOADING, ResourceState.LOCALIZED, ResourceEventType.LOCALIZED, new FetchSuccessTransition()) .addTransition(ResourceState.DOWNLOADING,ResourceState.DOWNLOADING, ResourceEventType.RELEASE, new ReleaseTransition()) .addTransition(ResourceState.DOWNLOADING, ResourceState.FAILED, ResourceEventType.LOCALIZATION_FAILED, new FetchFailedTransition()) // From LOCALIZED (ref >= 0, on disk) .addTransition(ResourceState.LOCALIZED, ResourceState.LOCALIZED, ResourceEventType.REQUEST, new LocalizedResourceTransition()) .addTransition(ResourceState.LOCALIZED, ResourceState.LOCALIZED, ResourceEventType.RELEASE, new ReleaseTransition()) .installTopology(); public LocalizedResource(LocalResourceRequest rsrc, Dispatcher dispatcher) { this.rsrc = rsrc; this.dispatcher = dispatcher; this.ref = new LinkedList<ContainerId>(); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.readLock = readWriteLock.readLock(); this.writeLock = readWriteLock.writeLock(); this.stateMachine = stateMachineFactory.make(this); } public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ ").append(rsrc.toString()).append(",") .append(getState() == ResourceState.LOCALIZED ? getLocalPath() + "," + getSize() : "pending").append(",["); try { this.readLock.lock(); for (ContainerId c : ref) { sb.append("(").append(c.toString()).append(")"); } sb.append("],").append(getTimestamp()).append(",").append(getState()) .append("}"); return sb.toString(); } finally { this.readLock.unlock(); } } private void release(ContainerId container) { if (ref.remove(container)) { // updating the timestamp only in case of success. timestamp.set(currentTime()); } else { LOG.info("Container " + container + " doesn't exist in the container list of the Resource " + this + " to which it sent RELEASE event"); } } private long currentTime() { return System.nanoTime(); } public ResourceState getState() { this.readLock.lock(); try { return stateMachine.getCurrentState(); } finally { this.readLock.unlock(); } } public LocalResourceRequest getRequest() { return rsrc; } public Path getLocalPath() { return localPath; } public void setLocalPath(Path localPath) { this.localPath = Path.getPathWithoutSchemeAndAuthority(localPath); } public long getTimestamp() { return timestamp.get(); } public long getSize() { return size; } public int getRefCount() { return ref.size(); } public boolean tryAcquire() { return sem.tryAcquire(); } public void unlock() { sem.release(); } @Override public void handle(ResourceEvent event) { try { this.writeLock.lock(); Path resourcePath = event.getLocalResourceRequest().getPath(); LOG.debug("Processing " + resourcePath + " of type " + event.getType()); ResourceState oldState = this.stateMachine.getCurrentState(); ResourceState newState = null; try { newState = this.stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.warn("Can't handle this event at current state", e); } if (oldState != newState) { LOG.info("Resource " + resourcePath + (localPath != null ? "(->" + localPath + ")": "") + " transitioned from " + oldState + " to " + newState); } } finally { this.writeLock.unlock(); } } static abstract class ResourceTransition implements SingleArcTransition<LocalizedResource,ResourceEvent> { // typedef } /** * Transition from INIT to DOWNLOADING. * Sends a {@link LocalizerResourceRequestEvent} to the * {@link ResourceLocalizationService}. */ @SuppressWarnings("unchecked") // dispatcher not typed private static class FetchResourceTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { ResourceRequestEvent req = (ResourceRequestEvent) event; LocalizerContext ctxt = req.getContext(); ContainerId container = ctxt.getContainerId(); rsrc.ref.add(container); rsrc.dispatcher.getEventHandler().handle( new LocalizerResourceRequestEvent(rsrc, req.getVisibility(), ctxt, req.getLocalResourceRequest().getPattern())); } } /** * Resource localized, notify waiting containers. */ @SuppressWarnings("unchecked") // dispatcher not typed private static class FetchSuccessTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { ResourceLocalizedEvent locEvent = (ResourceLocalizedEvent) event; rsrc.localPath = Path.getPathWithoutSchemeAndAuthority(locEvent.getLocation()); rsrc.size = locEvent.getSize(); for (ContainerId container : rsrc.ref) { rsrc.dispatcher.getEventHandler().handle( new ContainerResourceLocalizedEvent( container, rsrc.rsrc, rsrc.localPath)); } } } /** * Resource localization failed, notify waiting containers. */ @SuppressWarnings("unchecked") private static class FetchFailedTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { ResourceFailedLocalizationEvent failedEvent = (ResourceFailedLocalizationEvent) event; Queue<ContainerId> containers = rsrc.ref; for (ContainerId container : containers) { rsrc.dispatcher.getEventHandler().handle( new ContainerResourceFailedEvent(container, failedEvent .getLocalResourceRequest(), failedEvent.getDiagnosticMessage())); } } } /** * Resource already localized, notify immediately. */ @SuppressWarnings("unchecked") // dispatcher not typed private static class LocalizedResourceTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { // notify waiting containers ResourceRequestEvent reqEvent = (ResourceRequestEvent) event; ContainerId container = reqEvent.getContext().getContainerId(); rsrc.ref.add(container); rsrc.dispatcher.getEventHandler().handle( new ContainerResourceLocalizedEvent( container, rsrc.rsrc, rsrc.localPath)); } } /** * Decrement resource count, update timestamp. */ private static class ReleaseTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { // Note: assumes that localizing container must succeed or fail ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event; rsrc.release(relEvent.getContainer()); } } private static class RecoveredTransition extends ResourceTransition { @Override public void transition(LocalizedResource rsrc, ResourceEvent event) { ResourceRecoveredEvent recoveredEvent = (ResourceRecoveredEvent) event; rsrc.localPath = recoveredEvent.getLocalPath(); rsrc.size = recoveredEvent.getSize(); } } }
11,729
36.596154
114
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.io.DataInputStream; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.FSDownload; import com.google.common.util.concurrent.ThreadFactoryBuilder; public class ContainerLocalizer { static final Log LOG = LogFactory.getLog(ContainerLocalizer.class); public static final String FILECACHE = "filecache"; public static final String APPCACHE = "appcache"; public static final String USERCACHE = "usercache"; public static final String OUTPUTDIR = "output"; public static final String TOKEN_FILE_NAME_FMT = "%s.tokens"; public static final String WORKDIR = "work"; private static final String APPCACHE_CTXT_FMT = "%s.app.cache.dirs"; private static final String USERCACHE_CTXT_FMT = "%s.user.cache.dirs"; private static final FsPermission FILECACHE_PERMS = new FsPermission((short)0710); private final String user; private final String appId; private final List<Path> localDirs; private final String localizerId; private final FileContext lfs; private final Configuration conf; private final RecordFactory recordFactory; private final Map<LocalResource,Future<Path>> pendingResources; private final String appCacheDirContextName; public ContainerLocalizer(FileContext lfs, String user, String appId, String localizerId, List<Path> localDirs, RecordFactory recordFactory) throws IOException { if (null == user) { throw new IOException("Cannot initialize for null user"); } if (null == localizerId) { throw new IOException("Cannot initialize for null containerId"); } this.lfs = lfs; this.user = user; this.appId = appId; this.localDirs = localDirs; this.localizerId = localizerId; this.recordFactory = recordFactory; this.conf = new Configuration(); this.appCacheDirContextName = String.format(APPCACHE_CTXT_FMT, appId); this.pendingResources = new HashMap<LocalResource,Future<Path>>(); } LocalizationProtocol getProxy(final InetSocketAddress nmAddr) { YarnRPC rpc = YarnRPC.create(conf); return (LocalizationProtocol) rpc.getProxy(LocalizationProtocol.class, nmAddr, conf); } @SuppressWarnings("deprecation") public int runLocalization(final InetSocketAddress nmAddr) throws IOException, InterruptedException { // load credentials initDirs(conf, user, appId, lfs, localDirs); final Credentials creds = new Credentials(); DataInputStream credFile = null; try { // assume credentials in cwd // TODO: Fix Path tokenPath = new Path(String.format(TOKEN_FILE_NAME_FMT, localizerId)); credFile = lfs.open(tokenPath); creds.readTokenStorageStream(credFile); // Explicitly deleting token file. lfs.delete(tokenPath, false); } finally { if (credFile != null) { credFile.close(); } } // create localizer context UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user); remoteUser.addToken(creds.getToken(LocalizerTokenIdentifier.KIND)); final LocalizationProtocol nodeManager = remoteUser.doAs(new PrivilegedAction<LocalizationProtocol>() { @Override public LocalizationProtocol run() { return getProxy(nmAddr); } }); // create user context UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) { ugi.addToken(token); } ExecutorService exec = null; try { exec = createDownloadThreadPool(); CompletionService<Path> ecs = createCompletionService(exec); localizeFiles(nodeManager, ecs, ugi); return 0; } catch (Throwable e) { // Print traces to stdout so that they can be logged by the NM address // space. e.printStackTrace(System.out); return -1; } finally { try { if (exec != null) { exec.shutdownNow(); } LocalDirAllocator.removeContext(appCacheDirContextName); } finally { closeFileSystems(ugi); } } } ExecutorService createDownloadThreadPool() { return Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() .setNameFormat("ContainerLocalizer Downloader").build()); } CompletionService<Path> createCompletionService(ExecutorService exec) { return new ExecutorCompletionService<Path>(exec); } Callable<Path> download(Path path, LocalResource rsrc, UserGroupInformation ugi) throws IOException { DiskChecker.checkDir(new File(path.toUri().getRawPath())); return new FSDownload(lfs, ugi, conf, path, rsrc); } static long getEstimatedSize(LocalResource rsrc) { if (rsrc.getSize() < 0) { return -1; } switch (rsrc.getType()) { case ARCHIVE: case PATTERN: return 5 * rsrc.getSize(); case FILE: default: return rsrc.getSize(); } } void sleep(int duration) throws InterruptedException { TimeUnit.SECONDS.sleep(duration); } protected void closeFileSystems(UserGroupInformation ugi) { try { FileSystem.closeAllForUGI(ugi); } catch (IOException e) { LOG.warn("Failed to close filesystems: ", e); } } protected void localizeFiles(LocalizationProtocol nodemanager, CompletionService<Path> cs, UserGroupInformation ugi) throws IOException { while (true) { try { LocalizerStatus status = createStatus(); LocalizerHeartbeatResponse response = nodemanager.heartbeat(status); switch (response.getLocalizerAction()) { case LIVE: List<ResourceLocalizationSpec> newRsrcs = response.getResourceSpecs(); for (ResourceLocalizationSpec newRsrc : newRsrcs) { if (!pendingResources.containsKey(newRsrc.getResource())) { pendingResources.put(newRsrc.getResource(), cs.submit(download( new Path(newRsrc.getDestinationDirectory().getFile()), newRsrc.getResource(), ugi))); } } break; case DIE: // killall running localizations for (Future<Path> pending : pendingResources.values()) { pending.cancel(true); } status = createStatus(); // ignore response try { nodemanager.heartbeat(status); } catch (YarnException e) { } return; } cs.poll(1000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { return; } catch (YarnException e) { // TODO cleanup return; } } } /** * Create the payload for the HeartBeat. Mainly the list of * {@link LocalResourceStatus}es * * @return a {@link LocalizerStatus} that can be sent via heartbeat. * @throws InterruptedException */ private LocalizerStatus createStatus() throws InterruptedException { final List<LocalResourceStatus> currentResources = new ArrayList<LocalResourceStatus>(); // TODO: Synchronization?? for (Iterator<LocalResource> i = pendingResources.keySet().iterator(); i.hasNext();) { LocalResource rsrc = i.next(); LocalResourceStatus stat = recordFactory.newRecordInstance(LocalResourceStatus.class); stat.setResource(rsrc); Future<Path> fPath = pendingResources.get(rsrc); if (fPath.isDone()) { try { Path localPath = fPath.get(); stat.setLocalPath( ConverterUtils.getYarnUrlFromPath(localPath)); stat.setLocalSize( FileUtil.getDU(new File(localPath.getParent().toUri()))); stat.setStatus(ResourceStatusType.FETCH_SUCCESS); } catch (ExecutionException e) { stat.setStatus(ResourceStatusType.FETCH_FAILURE); stat.setException(SerializedException.newInstance(e.getCause())); } catch (CancellationException e) { stat.setStatus(ResourceStatusType.FETCH_FAILURE); stat.setException(SerializedException.newInstance(e)); } // TODO shouldn't remove until ACK i.remove(); } else { stat.setStatus(ResourceStatusType.FETCH_PENDING); } currentResources.add(stat); } LocalizerStatus status = recordFactory.newRecordInstance(LocalizerStatus.class); status.setLocalizerId(localizerId); status.addAllResources(currentResources); return status; } /** * Returns the JVM options to to launch the resource localizer. * @param conf the configuration properties to launch the resource localizer. */ public static List<String> getJavaOpts(Configuration conf) { String opts = conf.get(YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY, YarnConfiguration.NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT); return Arrays.asList(opts.split(" ")); } /** * Adds the ContainerLocalizer arguments for a @{link ShellCommandExecutor}, * as expected by ContainerLocalizer.main * @param command the current ShellCommandExecutor command line * @param user localization user * @param appId localized app id * @param locId localizer id * @param nmAddr nodemanager address * @param localDirs list of local dirs */ public static void buildMainArgs(List<String> command, String user, String appId, String locId, InetSocketAddress nmAddr, List<String> localDirs) { command.add(ContainerLocalizer.class.getName()); command.add(user); command.add(appId); command.add(locId); command.add(nmAddr.getHostName()); command.add(Integer.toString(nmAddr.getPort())); for(String dir : localDirs) { command.add(dir); } } public static void main(String[] argv) throws Throwable { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); // usage: $0 user appId locId host port app_log_dir user_dir [user_dir]* // let $x = $x/usercache for $local.dir // MKDIR $x/$user/appcache/$appid // MKDIR $x/$user/appcache/$appid/output // MKDIR $x/$user/appcache/$appid/filecache // LOAD $x/$user/appcache/$appid/appTokens try { String user = argv[0]; String appId = argv[1]; String locId = argv[2]; InetSocketAddress nmAddr = new InetSocketAddress(argv[3], Integer.parseInt(argv[4])); String[] sLocaldirs = Arrays.copyOfRange(argv, 5, argv.length); ArrayList<Path> localDirs = new ArrayList<Path>(sLocaldirs.length); for (String sLocaldir : sLocaldirs) { localDirs.add(new Path(sLocaldir)); } final String uid = UserGroupInformation.getCurrentUser().getShortUserName(); if (!user.equals(uid)) { // TODO: fail localization LOG.warn("Localization running as " + uid + " not " + user); } ContainerLocalizer localizer = new ContainerLocalizer(FileContext.getLocalFSFileContext(), user, appId, locId, localDirs, RecordFactoryProvider.getRecordFactory(null)); int nRet = localizer.runLocalization(nmAddr); if (LOG.isDebugEnabled()) { LOG.debug(String.format("nRet: %d", nRet)); } System.exit(nRet); } catch (Throwable e) { // Print error to stdout so that LCE can use it. e.printStackTrace(System.out); LOG.error("Exception in main:", e); throw e; } } private static void initDirs(Configuration conf, String user, String appId, FileContext lfs, List<Path> localDirs) throws IOException { if (null == localDirs || 0 == localDirs.size()) { throw new IOException("Cannot initialize without local dirs"); } String[] appsFileCacheDirs = new String[localDirs.size()]; String[] usersFileCacheDirs = new String[localDirs.size()]; for (int i = 0, n = localDirs.size(); i < n; ++i) { // $x/usercache/$user Path base = lfs.makeQualified( new Path(new Path(localDirs.get(i), USERCACHE), user)); // $x/usercache/$user/filecache Path userFileCacheDir = new Path(base, FILECACHE); usersFileCacheDirs[i] = userFileCacheDir.toString(); createDir(lfs, userFileCacheDir, FILECACHE_PERMS, false); // $x/usercache/$user/appcache/$appId Path appBase = new Path(base, new Path(APPCACHE, appId)); // $x/usercache/$user/appcache/$appId/filecache Path appFileCacheDir = new Path(appBase, FILECACHE); appsFileCacheDirs[i] = appFileCacheDir.toString(); createDir(lfs, appFileCacheDir, FILECACHE_PERMS, false); } conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs); conf.setStrings(String.format(USERCACHE_CTXT_FMT, user), usersFileCacheDirs); } private static void createDir(FileContext lfs, Path dirPath, FsPermission perms, boolean createParent) throws IOException { lfs.mkdir(dirPath, perms, createParent); if (!perms.equals(perms.applyUMask(lfs.getUMask()))) { lfs.setPermission(dirPath, perms); } } }
16,320
36.955814
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; @Private @Unstable public class SharedCacheUploadEvent extends AbstractEvent<SharedCacheUploadEventType> { private final Map<LocalResourceRequest,Path> resources; private final ContainerLaunchContext context; private final String user; public SharedCacheUploadEvent(Map<LocalResourceRequest,Path> resources, ContainerLaunchContext context, String user, SharedCacheUploadEventType eventType) { super(eventType); this.resources = resources; this.context = context; this.user = user; } public Map<LocalResourceRequest,Path> getResources() { return resources; } public ContainerLaunchContext getContainerLaunchContext() { return context; } public String getUser() { return user; } }
2,041
33.610169
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.UndeclaredThrowableException; import java.net.URISyntaxException; import java.util.concurrent.Callable; import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest; import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil; import org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum; import org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.FSDownload; import com.google.common.annotations.VisibleForTesting; /** * The callable class that handles the actual upload to the shared cache. */ class SharedCacheUploader implements Callable<Boolean> { // rwxr-xr-x static final FsPermission DIRECTORY_PERMISSION = new FsPermission((short)00755); // r-xr-xr-x static final FsPermission FILE_PERMISSION = new FsPermission((short)00555); private static final Log LOG = LogFactory.getLog(SharedCacheUploader.class); private final LocalResource resource; private final Path localPath; private final String user; private final Configuration conf; private final SCMUploaderProtocol scmClient; private final FileSystem fs; private final FileSystem localFs; private final String sharedCacheRootDir; private final int nestedLevel; private final SharedCacheChecksum checksum; private final RecordFactory recordFactory; public SharedCacheUploader(LocalResource resource, Path localPath, String user, Configuration conf, SCMUploaderProtocol scmClient) throws IOException { this(resource, localPath, user, conf, scmClient, FileSystem.get(conf), localPath.getFileSystem(conf)); } /** * @param resource the local resource that contains the original remote path * @param localPath the path in the local filesystem where the resource is * localized * @param fs the filesystem of the shared cache * @param localFs the local filesystem */ public SharedCacheUploader(LocalResource resource, Path localPath, String user, Configuration conf, SCMUploaderProtocol scmClient, FileSystem fs, FileSystem localFs) { this.resource = resource; this.localPath = localPath; this.user = user; this.conf = conf; this.scmClient = scmClient; this.fs = fs; this.sharedCacheRootDir = conf.get(YarnConfiguration.SHARED_CACHE_ROOT, YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT); this.nestedLevel = SharedCacheUtil.getCacheDepth(conf); this.checksum = SharedCacheChecksumFactory.getChecksum(conf); this.localFs = localFs; this.recordFactory = RecordFactoryProvider.getRecordFactory(null); } /** * Uploads the file under the shared cache, and notifies the shared cache * manager. If it is unable to upload the file because it already exists, it * returns false. */ @Override public Boolean call() throws Exception { Path tempPath = null; try { if (!verifyAccess()) { LOG.warn("User " + user + " is not authorized to upload file " + localPath.getName()); return false; } // first determine the actual local path that will be used for upload Path actualPath = getActualPath(); // compute the checksum String checksumVal = computeChecksum(actualPath); // create the directory (if it doesn't exist) Path directoryPath = new Path(SharedCacheUtil.getCacheEntryPath(nestedLevel, sharedCacheRootDir, checksumVal)); // let's not check if the directory already exists: in the vast majority // of the cases, the directory does not exist; as long as mkdirs does not // error out if it exists, we should be fine fs.mkdirs(directoryPath, DIRECTORY_PERMISSION); // create the temporary file tempPath = new Path(directoryPath, getTemporaryFileName(actualPath)); if (!uploadFile(actualPath, tempPath)) { LOG.warn("Could not copy the file to the shared cache at " + tempPath); return false; } // set the permission so that it is readable but not writable fs.setPermission(tempPath, FILE_PERMISSION); // rename it to the final filename Path finalPath = new Path(directoryPath, actualPath.getName()); if (!fs.rename(tempPath, finalPath)) { LOG.warn("The file already exists under " + finalPath + ". Ignoring this attempt."); deleteTempFile(tempPath); return false; } // notify the SCM if (!notifySharedCacheManager(checksumVal, actualPath.getName())) { // the shared cache manager rejected the upload (as it is likely // uploaded under a different name // clean up this file and exit fs.delete(finalPath, false); return false; } // set the replication factor short replication = (short)conf.getInt(YarnConfiguration.SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR, YarnConfiguration.DEFAULT_SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR); fs.setReplication(finalPath, replication); LOG.info("File " + actualPath.getName() + " was uploaded to the shared cache at " + finalPath); return true; } catch (IOException e) { LOG.warn("Exception while uploading the file " + localPath.getName(), e); // in case an exception is thrown, delete the temp file deleteTempFile(tempPath); throw e; } } @VisibleForTesting Path getActualPath() throws IOException { Path path = localPath; FileStatus status = localFs.getFileStatus(path); if (status != null && status.isDirectory()) { // for certain types of resources that get unpacked, the original file may // be found under the directory with the same name (see // FSDownload.unpack); check if the path is a directory and if so look // under it path = new Path(path, path.getName()); } return path; } private void deleteTempFile(Path tempPath) { try { if (tempPath != null && fs.exists(tempPath)) { fs.delete(tempPath, false); } } catch (IOException ignore) {} } /** * Checks that the (original) remote file is either owned by the user who * started the app or public. */ @VisibleForTesting boolean verifyAccess() throws IOException { // if it is in the public cache, it's trivially OK if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) { return true; } final Path remotePath; try { remotePath = ConverterUtils.getPathFromYarnURL(resource.getResource()); } catch (URISyntaxException e) { throw new IOException("Invalid resource", e); } // get the file status of the HDFS file FileSystem remoteFs = remotePath.getFileSystem(conf); FileStatus status = remoteFs.getFileStatus(remotePath); // check to see if the file has been modified in any way if (status.getModificationTime() != resource.getTimestamp()) { LOG.warn("The remote file " + remotePath + " has changed since it's localized; will not consider it for upload"); return false; } // check for the user ownership if (status.getOwner().equals(user)) { return true; // the user owns the file } // check if the file is publicly readable otherwise return fileIsPublic(remotePath, remoteFs, status); } @VisibleForTesting boolean fileIsPublic(final Path remotePath, FileSystem remoteFs, FileStatus status) throws IOException { return FSDownload.isPublic(remoteFs, remotePath, status, null); } /** * Uploads the file to the shared cache under a temporary name, and returns * the result. */ @VisibleForTesting boolean uploadFile(Path sourcePath, Path tempPath) throws IOException { return FileUtil.copy(localFs, sourcePath, fs, tempPath, false, conf); } @VisibleForTesting String computeChecksum(Path path) throws IOException { InputStream is = localFs.open(path); try { return checksum.computeChecksum(is); } finally { try { is.close(); } catch (IOException ignore) {} } } private String getTemporaryFileName(Path path) { return path.getName() + "-" + ThreadLocalRandom.current().nextLong(); } @VisibleForTesting boolean notifySharedCacheManager(String checksumVal, String fileName) throws IOException { try { SCMUploaderNotifyRequest request = recordFactory.newRecordInstance(SCMUploaderNotifyRequest.class); request.setResourceKey(checksumVal); request.setFilename(fileName); return scmClient.notify(request).getAccepted(); } catch (YarnException e) { throw new IOException(e); } catch (UndeclaredThrowableException e) { // retrieve the cause of the exception and throw it as an IOException throw new IOException(e.getCause() == null ? e : e.getCause()); } } }
10,682
36.749117
91
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import com.google.common.util.concurrent.ThreadFactoryBuilder; @Private @Unstable /** * Service that uploads localized files to the shared cache. The upload is * considered not critical, and is done on a best-effort basis. Failure to * upload is not fatal. */ public class SharedCacheUploadService extends AbstractService implements EventHandler<SharedCacheUploadEvent> { private static final Log LOG = LogFactory.getLog(SharedCacheUploadService.class); private boolean enabled; private FileSystem fs; private FileSystem localFs; private ExecutorService uploaderPool; private SCMUploaderProtocol scmClient; public SharedCacheUploadService() { super(SharedCacheUploadService.class.getName()); } @Override protected void serviceInit(Configuration conf) throws Exception { enabled = conf.getBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, YarnConfiguration.DEFAULT_SHARED_CACHE_ENABLED); if (enabled) { int threadCount = conf.getInt(YarnConfiguration.SHARED_CACHE_NM_UPLOADER_THREAD_COUNT, YarnConfiguration.DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT); uploaderPool = Executors.newFixedThreadPool(threadCount, new ThreadFactoryBuilder(). setNameFormat("Shared cache uploader #%d"). build()); scmClient = createSCMClient(conf); try { fs = FileSystem.get(conf); localFs = FileSystem.getLocal(conf); } catch (IOException e) { LOG.error("Unexpected exception in getting the filesystem", e); throw new RuntimeException(e); } } super.serviceInit(conf); } private SCMUploaderProtocol createSCMClient(Configuration conf) { YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress scmAddress = conf.getSocketAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_PORT); return (SCMUploaderProtocol)rpc.getProxy( SCMUploaderProtocol.class, scmAddress, conf); } @Override protected void serviceStop() throws Exception { if (enabled) { uploaderPool.shutdown(); RPC.stopProxy(scmClient); } super.serviceStop(); } @Override public void handle(SharedCacheUploadEvent event) { if (enabled) { Map<LocalResourceRequest,Path> resources = event.getResources(); for (Map.Entry<LocalResourceRequest,Path> e: resources.entrySet()) { SharedCacheUploader uploader = new SharedCacheUploader(e.getKey(), e.getValue(), event.getUser(), getConfig(), scmClient, fs, localFs); // fire off an upload task uploaderPool.submit(uploader); } } } public boolean isEnabled() { return enabled; } }
4,615
35.346457
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @Private @Unstable public enum SharedCacheUploadEventType { UPLOAD }
1,091
36.655172
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSecretManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security; import javax.crypto.SecretKey; import org.apache.hadoop.security.token.SecretManager; public class LocalizerTokenSecretManager extends SecretManager<LocalizerTokenIdentifier> { private final SecretKey secretKey; public LocalizerTokenSecretManager() { this.secretKey = generateSecret(); } @Override protected byte[] createPassword(LocalizerTokenIdentifier identifier) { return createPassword(identifier.getBytes(), secretKey); } @Override public byte[] retrievePassword(LocalizerTokenIdentifier identifier) throws org.apache.hadoop.security.token.SecretManager.InvalidToken { return createPassword(identifier.getBytes(), secretKey); } @Override public LocalizerTokenIdentifier createIdentifier() { return new LocalizerTokenIdentifier(); } }
1,682
32
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security; import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; public class LocalizerTokenSelector implements TokenSelector<LocalizerTokenIdentifier> { private static final Log LOG = LogFactory .getLog(LocalizerTokenSelector.class); @SuppressWarnings("unchecked") @Override public Token<LocalizerTokenIdentifier> selectToken(Text service, Collection<Token<? extends TokenIdentifier>> tokens) { LOG.debug("Using localizerTokenSelector."); for (Token<? extends TokenIdentifier> token : tokens) { LOG.debug("Token of kind " + token.getKind() + " is found"); if (LocalizerTokenIdentifier.KIND.equals(token.getKind())) { return (Token<LocalizerTokenIdentifier>) token; } } LOG.debug("Returning null."); return null; } }
1,917
35.188679
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security; import java.lang.annotation.Annotation; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; public class LocalizerSecurityInfo extends SecurityInfo { private static final Log LOG = LogFactory.getLog(LocalizerSecurityInfo.class); @Override public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) { return null; } @Override public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) { if (!protocol .equals(LocalizationProtocolPB.class)) { return null; } return new TokenInfo() { @Override public Class<? extends Annotation> annotationType() { return null; } @Override public Class<? extends TokenSelector<? extends TokenIdentifier>> value() { LOG.debug("Using localizerTokenSecurityInfo"); return LocalizerTokenSelector.class; } }; } }
2,183
33.125
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenIdentifier.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.TokenIdentifier; public class LocalizerTokenIdentifier extends TokenIdentifier { public static final Text KIND = new Text("Localizer"); @Override public void write(DataOutput out) throws IOException { // TODO Auto-generated method stub out.writeInt(1); } @Override public void readFields(DataInput in) throws IOException { // TODO Auto-generated method stub in.readInt(); } @Override public Text getKind() { // TODO Auto-generated method stub return KIND; } @Override public UserGroupInformation getUser() { // TODO Auto-generated method stub return UserGroupInformation.createRemoteUser("testing"); } }
1,761
29.37931
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceLocalizedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ResourceLocalizedEvent extends ResourceEvent { private final long size; private final Path location; public ResourceLocalizedEvent(LocalResourceRequest rsrc, Path location, long size) { super(rsrc, ResourceEventType.LOCALIZED); this.size = size; this.location = location; } public Path getLocation() { return location; } public long getSize() { return size; } }
1,431
31.545455
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationCleanupEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import java.util.Collection; import java.util.Map; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ContainerLocalizationCleanupEvent extends ContainerLocalizationEvent { private final Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrc; /** * Event requesting the cleanup of the rsrc. * @param c * @param rsrc */ public ContainerLocalizationCleanupEvent(Container c, Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrc) { super(LocalizationEventType.CLEANUP_CONTAINER_RESOURCES, c); this.rsrc = rsrc; } public Map<LocalResourceVisibility, Collection<LocalResourceRequest>> getResources() { return rsrc; } }
1,799
35.734694
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceRequestEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizerContext; public class ResourceRequestEvent extends ResourceEvent { private final LocalizerContext context; private final LocalResourceVisibility vis; public ResourceRequestEvent(LocalResourceRequest resource, LocalResourceVisibility vis, LocalizerContext context) { super(resource, ResourceEventType.REQUEST); this.vis = vis; this.context = context; } public LocalizerContext getContext() { return context; } public LocalResourceVisibility getVisibility() { return vis; } }
1,648
35.644444
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ApplicationLocalizationEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; public class ApplicationLocalizationEvent extends LocalizationEvent { final Application app; public ApplicationLocalizationEvent(LocalizationEventType type, Application app) { super(type); this.app = app; } public Application getApplication() { return app; } }
1,256
32.972973
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ResourceEvent extends AbstractEvent<ResourceEventType> { private final LocalResourceRequest rsrc; public ResourceEvent(LocalResourceRequest rsrc, ResourceEventType type) { super(type); this.rsrc = rsrc; } public LocalResourceRequest getLocalResourceRequest() { return rsrc; } }
1,343
35.324324
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceRecoveredEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ResourceRecoveredEvent extends ResourceEvent { private final Path localPath; private final long size; public ResourceRecoveredEvent(LocalResourceRequest rsrc, Path localPath, long size) { super(rsrc, ResourceEventType.RECOVERED); this.localPath = localPath; this.size = size; } public Path getLocalPath() { return localPath; } public long getSize() { return size; } }
1,437
31.681818
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizationEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; public enum LocalizationEventType { INIT_APPLICATION_RESOURCES, INIT_CONTAINER_RESOURCES, CACHE_CLEANUP, CLEANUP_CONTAINER_RESOURCES, DESTROY_APPLICATION_RESOURCES, CONTAINER_RESOURCES_LOCALIZED, }
1,086
37.821429
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; /** * Events delivered to the {@link ResourceLocalizationService} */ public class LocalizerEvent extends AbstractEvent<LocalizerEventType> { private final String localizerId; public LocalizerEvent(LocalizerEventType type, String localizerId) { super(type); this.localizerId = localizerId; } public String getLocalizerId() { return localizerId; } }
1,409
34.25
104
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceReleaseEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; public class ResourceReleaseEvent extends ResourceEvent { private final ContainerId container; public ResourceReleaseEvent(LocalResourceRequest rsrc, ContainerId container) { super(rsrc, ResourceEventType.RELEASE); this.container = container; } public ContainerId getContainer() { return container; } }
1,366
34.973684
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ContainerLocalizationEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; public class ContainerLocalizationEvent extends LocalizationEvent { final Container container; public ContainerLocalizationEvent(LocalizationEventType event, Container c) { super(event); this.container = c; } public Container getContainer() { return container; } }
1,255
33.888889
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; public enum LocalizerEventType { /** See {@link LocalizerResourceRequestEvent} */ REQUEST_RESOURCE_LOCALIZATION }
994
40.458333
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizerContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.util.ConverterUtils; /** * Event indicating that the {@link ResourceLocalizationService} * should fetch this resource. */ public class LocalizerResourceRequestEvent extends LocalizerEvent { private final LocalizerContext context; private final LocalizedResource resource; private final LocalResourceVisibility vis; private final String pattern; public LocalizerResourceRequestEvent(LocalizedResource resource, LocalResourceVisibility vis, LocalizerContext context, String pattern) { super(LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION, ConverterUtils.toString(context.getContainerId())); this.vis = vis; this.context = context; this.resource = resource; this.pattern = pattern; } public LocalizedResource getResource() { return resource; } public LocalizerContext getContext() { return context; } public LocalResourceVisibility getVisibility() { return vis; } public String getPattern() { return pattern; } }
2,268
34.453125
104
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; /** * Events delivered to {@link LocalizedResource}. Each of these * events is a subclass of {@link ResourceEvent}. */ public enum ResourceEventType { /** See {@link ResourceRequestEvent} */ REQUEST, /** See {@link ResourceLocalizedEvent} */ LOCALIZED, /** See {@link ResourceReleaseEvent} */ RELEASE, /** See {@link ResourceFailedLocalizationEvent} */ LOCALIZATION_FAILED, /** See {@link ResourceRecoveredEvent} */ RECOVERED }
1,424
36.5
94
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceFailedLocalizationEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; /** * This event is sent by the localizer in case resource localization fails for * the requested resource. */ public class ResourceFailedLocalizationEvent extends ResourceEvent { private final String diagnosticMesage; public ResourceFailedLocalizationEvent(LocalResourceRequest rsrc, String diagnosticMesage) { super(rsrc, ResourceEventType.LOCALIZATION_FAILED); this.diagnosticMesage = diagnosticMesage; } public String getDiagnosticMessage() { return diagnosticMesage; } }
1,501
36.55
97
java