repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.InvalidContainerException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.api.ResourceManagerConstants; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestAuxServices.ServiceA; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; public class TestContainerManager extends BaseContainerManagerTest { public TestContainerManager() throws UnsupportedFileSystemException { super(); } static { LOG = LogFactory.getLog(TestContainerManager.class); } @Override @Before public void setup() throws IOException { super.setup(); } private ContainerId createContainerId(int id) { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId.newContainerId(appAttemptId, id); return containerId; } @Override protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, metrics, new ApplicationACLsManager(conf), dirsHandler) { @Override public void setBlockNewContainerRequests(boolean blockNewContainerRequests) { // do nothing } @Override protected UserGroupInformation getRemoteUgi() throws YarnException { ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(appAttemptId.toString()); ugi.addTokenIdentifier(new NMTokenIdentifier(appAttemptId, context .getNodeId(), user, context.getNMTokenSecretManager().getCurrentKey() .getKeyId())); return ugi; } @Override protected void authorizeGetAndStopContainerRequest(ContainerId containerId, Container container, boolean stopRequest, NMTokenIdentifier identifier) throws YarnException { if(container == null || container.getUser().equals("Fail")){ throw new YarnException("Reject this container"); } } }; } @Test public void testContainerManagerInitialization() throws IOException { containerManager.start(); InetAddress localAddr = InetAddress.getLocalHost(); String fqdn = localAddr.getCanonicalHostName(); if (!localAddr.getHostAddress().equals(fqdn)) { // only check if fqdn is not same as ip // api returns ip in case of resolution failure Assert.assertEquals(fqdn, context.getNodeId().getHost()); } // Just do a query for a non-existing container. boolean throwsException = false; try { List<ContainerId> containerIds = new ArrayList<ContainerId>(); ContainerId id =createContainerId(0); containerIds.add(id); GetContainerStatusesRequest request = GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse response = containerManager.getContainerStatuses(request); if(response.getFailedRequests().containsKey(id)){ throw response.getFailedRequests().get(id).deSerialize(); } } catch (Throwable e) { throwsException = true; } Assert.assertTrue(throwsException); } @Test public void testContainerSetup() throws Exception { containerManager.start(); // ////// Create the resources for the container File dir = new File(tmpDir, "dir"); dir.mkdirs(); File file = new File(dir, "file"); PrintWriter fileWriter = new PrintWriter(file); fileWriter.write("Hello World!"); fileWriter.close(); // ////// Construct the Container-id ContainerId cId = createContainerId(0); // ////// Construct the container-spec. ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(file.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); StartContainerRequest scRequest = StartContainerRequest.newInstance( containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); // Now ascertain that the resources are localised correctly. ApplicationId appId = cId.getApplicationAttemptId().getApplicationId(); String appIDStr = ConverterUtils.toString(appId); String containerIDStr = ConverterUtils.toString(cId); File userCacheDir = new File(localDir, ContainerLocalizer.USERCACHE); File userDir = new File(userCacheDir, user); File appCache = new File(userDir, ContainerLocalizer.APPCACHE); File appDir = new File(appCache, appIDStr); File containerDir = new File(appDir, containerIDStr); File targetFile = new File(containerDir, destinationFile); File sysDir = new File(localDir, ResourceLocalizationService.NM_PRIVATE_DIR); File appSysDir = new File(sysDir, appIDStr); File containerSysDir = new File(appSysDir, containerIDStr); for (File f : new File[] { localDir, sysDir, userCacheDir, appDir, appSysDir, containerDir, containerSysDir }) { Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!", f.exists()); Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!", f.isDirectory()); } Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!", targetFile.exists()); // Now verify the contents of the file BufferedReader reader = new BufferedReader(new FileReader(targetFile)); Assert.assertEquals("Hello World!", reader.readLine()); Assert.assertEquals(null, reader.readLine()); } @Test public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException { containerManager.start(); File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile"); PrintWriter fileWriter = new PrintWriter(scriptFile); File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile(); // ////// Construct the Container-id ContainerId cId = createContainerId(0); if (Shell.WINDOWS) { fileWriter.println("@echo Hello World!> " + processStartFile); fileWriter.println("@echo " + cId + ">> " + processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); // So that start file is readable by the test fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs = 0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists()); // Now verify the contents of the file BufferedReader reader = new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!", reader.readLine()); // Get the pid of the process String pid = reader.readLine().trim(); // No more lines Assert.assertEquals(null, reader.readLine()); // Now test the stop functionality. // Assert that the process is alive Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid)); // Once more Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid)); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus()); // Assert that the process is not alive anymore Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid)); } private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException, YarnException { File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile"); PrintWriter fileWriter = new PrintWriter(scriptFile); File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile(); // ////// Construct the Container-id ContainerId cId = createContainerId(0); if (Shell.WINDOWS) { fileWriter.println("@echo Hello World!> " + processStartFile); fileWriter.println("@echo " + cId + ">> " + processStartFile); if (exitCode != 0) { fileWriter.println("@exit " + exitCode); } } else { fileWriter.write("\numask 0"); // So that start file is readable by the test fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); // Have script throw an exit code at the end if (exitCode != 0) { fileWriter.write("\nexit "+exitCode); } } fileWriter.close(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest = StartContainerRequest.newInstance( containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); // Verify exit status matches exit state of script Assert.assertEquals(exitCode, containerStatus.getExitStatus()); } @Test public void testContainerLaunchAndExitSuccess() throws IOException, InterruptedException, YarnException { containerManager.start(); int exitCode = 0; // launch context for a command that will return exit code 0 // and verify exit code returned testContainerLaunchAndExit(exitCode); } @Test public void testContainerLaunchAndExitFailure() throws IOException, InterruptedException, YarnException { containerManager.start(); int exitCode = 50; // launch context for a command that will return exit code 0 // and verify exit code returned testContainerLaunchAndExit(exitCode); } @Test public void testLocalFilesCleanup() throws InterruptedException, IOException, YarnException { // Real del service delSrvc = new DeletionService(exec); delSrvc.init(conf); containerManager = createContainerManager(delSrvc); containerManager.init(conf); containerManager.start(); // ////// Create the resources for the container File dir = new File(tmpDir, "dir"); dir.mkdirs(); File file = new File(dir, "file"); PrintWriter fileWriter = new PrintWriter(file); fileWriter.write("Hello World!"); fileWriter.close(); // ////// Construct the Container-id ContainerId cId = createContainerId(0); ApplicationId appId = cId.getApplicationAttemptId().getApplicationId(); // ////// Construct the container-spec. ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // containerLaunchContext.resources = // new HashMap<CharSequence, LocalResource>(); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(FileContext.getLocalFSFileContext() .makeQualified(new Path(file.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); StartContainerRequest scRequest = StartContainerRequest.newInstance( containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getApplicationAttemptId().getApplicationId(), ApplicationState.RUNNING); // Now ascertain that the resources are localised correctly. String appIDStr = ConverterUtils.toString(appId); String containerIDStr = ConverterUtils.toString(cId); File userCacheDir = new File(localDir, ContainerLocalizer.USERCACHE); File userDir = new File(userCacheDir, user); File appCache = new File(userDir, ContainerLocalizer.APPCACHE); File appDir = new File(appCache, appIDStr); File containerDir = new File(appDir, containerIDStr); File targetFile = new File(containerDir, destinationFile); File sysDir = new File(localDir, ResourceLocalizationService.NM_PRIVATE_DIR); File appSysDir = new File(sysDir, appIDStr); File containerSysDir = new File(appSysDir, containerIDStr); // AppDir should still exist Assert.assertTrue("AppDir " + appDir.getAbsolutePath() + " doesn't exist!!", appDir.exists()); Assert.assertTrue("AppSysDir " + appSysDir.getAbsolutePath() + " doesn't exist!!", appSysDir.exists()); for (File f : new File[] { containerDir, containerSysDir }) { Assert.assertFalse(f.getAbsolutePath() + " exists!!", f.exists()); } Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!", targetFile.exists()); // Simulate RM sending an AppFinish event. containerManager.handle(new CMgrCompletedAppsEvent(Arrays .asList(new ApplicationId[] { appId }), CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN)); BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getApplicationAttemptId().getApplicationId(), ApplicationState.FINISHED); // Now ascertain that the resources are localised correctly. for (File f : new File[] { appDir, containerDir, appSysDir, containerSysDir }) { // Wait for deletion. Deletion can happen long after AppFinish because of // the async DeletionService int timeout = 0; while (f.exists() && timeout++ < 15) { Thread.sleep(1000); } Assert.assertFalse(f.getAbsolutePath() + " exists!!", f.exists()); } // Wait for deletion int timeout = 0; while (targetFile.exists() && timeout++ < 15) { Thread.sleep(1000); } Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!", targetFile.exists()); } @Test public void testContainerLaunchFromPreviousRM() throws IOException, InterruptedException, YarnException { containerManager.start(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId cId1 = createContainerId(0); ContainerId cId2 = createContainerId(0); containerLaunchContext .setLocalResources(new HashMap<String, LocalResource>()); // Construct the Container with Invalid RMIdentifier StartContainerRequest startRequest1 = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId1, ResourceManagerConstants.RM_INVALID_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(startRequest1); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); boolean catchException = false; try { StartContainersResponse response = containerManager.startContainers(allRequests); if(response.getFailedRequests().containsKey(cId1)) { throw response.getFailedRequests().get(cId1).deSerialize(); } } catch (Throwable e) { e.printStackTrace(); catchException = true; Assert.assertTrue(e.getMessage().contains( "Container " + cId1 + " rejected as it is allocated by a previous RM")); Assert.assertTrue(e.getClass().getName() .equalsIgnoreCase(InvalidContainerException.class.getName())); } // Verify that startContainer fail because of invalid container request Assert.assertTrue(catchException); // Construct the Container with a RMIdentifier within current RM StartContainerRequest startRequest2 = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId2, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list2 = new ArrayList<StartContainerRequest>(); list.add(startRequest2); StartContainersRequest allRequests2 = StartContainersRequest.newInstance(list2); containerManager.startContainers(allRequests2); boolean noException = true; try { containerManager.startContainers(allRequests2); } catch (YarnException e) { noException = false; } // Verify that startContainer get no YarnException Assert.assertTrue(noException); } @Test public void testMultipleContainersLaunch() throws Exception { containerManager.start(); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); for (int i = 0; i < 10; i++) { ContainerId cId = createContainerId(i); long identifier = 0; if ((i & 1) == 0) // container with even id fail identifier = ResourceManagerConstants.RM_INVALID_IDENTIFIER; else identifier = DUMMY_RM_IDENTIFIER; Token containerToken = createContainerToken(cId, identifier, context.getNodeId(), user, context.getContainerTokenSecretManager()); StartContainerRequest request = StartContainerRequest.newInstance(containerLaunchContext, containerToken); list.add(request); } StartContainersRequest requestList = StartContainersRequest.newInstance(list); StartContainersResponse response = containerManager.startContainers(requestList); Assert.assertEquals(5, response.getSuccessfullyStartedContainers().size()); for (ContainerId id : response.getSuccessfullyStartedContainers()) { // Containers with odd id should succeed. Assert.assertEquals(1, id.getContainerId() & 1); } Assert.assertEquals(5, response.getFailedRequests().size()); for (Map.Entry<ContainerId, SerializedException> entry : response .getFailedRequests().entrySet()) { // Containers with even id should fail. Assert.assertEquals(0, entry.getKey().getContainerId() & 1); Assert.assertTrue(entry.getValue().getMessage() .contains( "Container " + entry.getKey() + " rejected as it is allocated by a previous RM")); } } @Test public void testMultipleContainersStopAndGetStatus() throws Exception { containerManager.start(); List<StartContainerRequest> startRequest = new ArrayList<StartContainerRequest>(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); List<ContainerId> containerIds = new ArrayList<ContainerId>(); for (int i = 0; i < 10; i++) { ContainerId cId = createContainerId(i); String user = null; if ((i & 1) == 0) { // container with even id fail user = "Fail"; } else { user = "Pass"; } Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()); StartContainerRequest request = StartContainerRequest.newInstance(containerLaunchContext, containerToken); startRequest.add(request); containerIds.add(cId); } // start containers StartContainersRequest requestList = StartContainersRequest.newInstance(startRequest); containerManager.startContainers(requestList); // Get container statuses GetContainerStatusesRequest statusRequest = GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse statusResponse = containerManager.getContainerStatuses(statusRequest); Assert.assertEquals(5, statusResponse.getContainerStatuses().size()); for (ContainerStatus status : statusResponse.getContainerStatuses()) { // Containers with odd id should succeed Assert.assertEquals(1, status.getContainerId().getContainerId() & 1); } Assert.assertEquals(5, statusResponse.getFailedRequests().size()); for (Map.Entry<ContainerId, SerializedException> entry : statusResponse .getFailedRequests().entrySet()) { // Containers with even id should fail. Assert.assertEquals(0, entry.getKey().getContainerId() & 1); Assert.assertTrue(entry.getValue().getMessage() .contains("Reject this container")); } // stop containers StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); StopContainersResponse stopResponse = containerManager.stopContainers(stopRequest); Assert.assertEquals(5, stopResponse.getSuccessfullyStoppedContainers() .size()); for (ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) { // Containers with odd id should succeed. Assert.assertEquals(1, id.getContainerId() & 1); } Assert.assertEquals(5, stopResponse.getFailedRequests().size()); for (Map.Entry<ContainerId, SerializedException> entry : stopResponse .getFailedRequests().entrySet()) { // Containers with even id should fail. Assert.assertEquals(0, entry.getKey().getContainerId() & 1); Assert.assertTrue(entry.getValue().getMessage() .contains("Reject this container")); } } @Test public void testStartContainerFailureWithUnknownAuxService() throws Exception { conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "existService" }); conf.setClass( String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "existService"), ServiceA.class, Service.class); containerManager.start(); List<StartContainerRequest> startRequest = new ArrayList<StartContainerRequest>(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>(); String serviceName = "non_exist_auxService"; serviceData.put(serviceName, ByteBuffer.wrap(serviceName.getBytes())); containerLaunchContext.setServiceData(serviceData); ContainerId cId = createContainerId(0); String user = "start_container_fail"; Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()); StartContainerRequest request = StartContainerRequest.newInstance(containerLaunchContext, containerToken); // start containers startRequest.add(request); StartContainersRequest requestList = StartContainersRequest.newInstance(startRequest); StartContainersResponse response = containerManager.startContainers(requestList); Assert.assertTrue(response.getFailedRequests().size() == 1); Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0); Assert.assertTrue(response.getFailedRequests().containsKey(cId)); Assert.assertTrue(response.getFailedRequests().get(cId).getMessage() .contains("The auxService:" + serviceName + " does not exist")); } /* Test added to verify fix in YARN-644 */ @Test public void testNullTokens() throws Exception { ContainerManagerImpl cMgrImpl = new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, metrics, new ApplicationACLsManager(conf), dirsHandler); String strExceptionMsg = ""; try { cMgrImpl.authorizeStartRequest(null, new ContainerTokenIdentifier()); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); strExceptionMsg = ""; try { cMgrImpl.authorizeStartRequest(new NMTokenIdentifier(), null); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_CONTAINERTOKEN_MSG); strExceptionMsg = ""; try { cMgrImpl.authorizeGetAndStopContainerRequest(null, null, true, null); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); strExceptionMsg = ""; try { cMgrImpl.authorizeUser(null, null); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); ContainerManagerImpl spyContainerMgr = Mockito.spy(cMgrImpl); UserGroupInformation ugInfo = UserGroupInformation.createRemoteUser("a"); Mockito.when(spyContainerMgr.getRemoteUgi()).thenReturn(ugInfo); Mockito.when(spyContainerMgr. selectNMTokenIdentifier(ugInfo)).thenReturn(null); strExceptionMsg = ""; try { spyContainerMgr.stopContainers(new StopContainersRequestPBImpl()); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); strExceptionMsg = ""; try { spyContainerMgr.getContainerStatuses( new GetContainerStatusesRequestPBImpl()); } catch(YarnException ye) { strExceptionMsg = ye.getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); Mockito.doNothing().when(spyContainerMgr).authorizeUser(ugInfo, null); List<StartContainerRequest> reqList = new ArrayList<StartContainerRequest>(); reqList.add(StartContainerRequest.newInstance(null, null)); StartContainersRequest reqs = new StartContainersRequestPBImpl(); reqs.setStartContainerRequests(reqList); strExceptionMsg = ""; try { spyContainerMgr.startContainers(reqs); } catch(YarnException ye) { strExceptionMsg = ye.getCause().getMessage(); } Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_CONTAINERTOKEN_MSG); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, NMContainerTokenSecretManager containerTokenSecretManager) throws IOException { return createContainerToken(cId, rmIdentifier, nodeId, user, containerTokenSecretManager, null); } public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, NMContainerTokenSecretManager containerTokenSecretManager, LogAggregationContext logAggregationContext) throws IOException { Resource r = BuilderUtils.newResource(1024, 1); ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, nodeId.toString(), user, r, System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null); Token containerToken = BuilderUtils .newContainerToken(nodeId, containerTokenSecretManager .retrievePassword(containerTokenIdentifier), containerTokenIdentifier); return containerToken; } }
37,414
40.205947
114
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.junit.Test; public class TestContainerManagerRecovery { private NodeManagerMetrics metrics = NodeManagerMetrics.create(); @Test public void testApplicationRecovery() throws Exception { YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true); conf.set(YarnConfiguration.NM_ADDRESS, "localhost:1234"); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, "yarn_admin_user"); NMStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); Context context = createContext(conf, stateStore); ContainerManagerImpl cm = createContainerManager(context); cm.init(conf); cm.start(); // add an application by starting a container String appUser = "app_user1"; String modUser = "modify_user1"; String viewUser = "view_user1"; String enemyUser = "enemy_user"; ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cid = ContainerId.newContainerId(attemptId, 1); Map<String, LocalResource> localResources = Collections.emptyMap(); Map<String, String> containerEnv = Collections.emptyMap(); List<String> containerCmds = Collections.emptyList(); Map<String, ByteBuffer> serviceData = Collections.emptyMap(); Credentials containerCreds = new Credentials(); DataOutputBuffer dob = new DataOutputBuffer(); containerCreds.writeTokenStorageToStream(dob); ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(); acls.put(ApplicationAccessType.MODIFY_APP, modUser); acls.put(ApplicationAccessType.VIEW_APP, viewUser); ContainerLaunchContext clc = ContainerLaunchContext.newInstance( localResources, containerEnv, containerCmds, serviceData, containerTokens, acls); // create the logAggregationContext LogAggregationContext logAggregationContext = LogAggregationContext.newInstance("includePattern", "excludePattern", "includePatternInRollingAggregation", "excludePatternInRollingAggregation"); StartContainersResponse startResponse = startContainer(context, cm, cid, clc, logAggregationContext); assertTrue(startResponse.getFailedRequests().isEmpty()); assertEquals(1, context.getApplications().size()); Application app = context.getApplications().get(appId); assertNotNull(app); waitForAppState(app, ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId)); // reset container manager and verify app recovered with proper acls cm.stop(); context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1, context.getApplications().size()); app = context.getApplications().get(appId); assertNotNull(app); // check whether LogAggregationContext is recovered correctly LogAggregationContext recovered = ((ApplicationImpl) app).getLogAggregationContext(); assertNotNull(recovered); assertEquals(logAggregationContext.getIncludePattern(), recovered.getIncludePattern()); assertEquals(logAggregationContext.getExcludePattern(), recovered.getExcludePattern()); assertEquals(logAggregationContext.getRolledLogsIncludePattern(), recovered.getRolledLogsIncludePattern()); assertEquals(logAggregationContext.getRolledLogsExcludePattern(), recovered.getRolledLogsExcludePattern()); waitForAppState(app, ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId)); // simulate application completion List<ApplicationId> finishedApps = new ArrayList<ApplicationId>(); finishedApps.add(appId); cm.handle(new CMgrCompletedAppsEvent(finishedApps, CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); // restart and verify app is marked for finishing cm.stop(); context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1, context.getApplications().size()); app = context.getApplications().get(appId); assertNotNull(app); waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId)); assertTrue(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId)); assertFalse(context.getApplicationACLsManager().checkAccess( UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId)); // simulate log aggregation completion app.handle(new ApplicationEvent(app.getAppId(), ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); assertEquals(app.getApplicationState(), ApplicationState.FINISHED); app.handle(new ApplicationEvent(app.getAppId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)); // restart and verify app is no longer present after recovery cm.stop(); context = createContext(conf, stateStore); cm = createContainerManager(context); cm.init(conf); cm.start(); assertTrue(context.getApplications().isEmpty()); cm.stop(); } @Test public void testContainerCleanupOnShutdown() throws Exception { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cid = ContainerId.newContainerId(attemptId, 1); Map<String, LocalResource> localResources = Collections.emptyMap(); Map<String, String> containerEnv = Collections.emptyMap(); List<String> containerCmds = Collections.emptyList(); Map<String, ByteBuffer> serviceData = Collections.emptyMap(); Credentials containerCreds = new Credentials(); DataOutputBuffer dob = new DataOutputBuffer(); containerCreds.writeTokenStorageToStream(dob); ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); Map<ApplicationAccessType, String> acls = Collections.emptyMap(); ContainerLaunchContext clc = ContainerLaunchContext.newInstance( localResources, containerEnv, containerCmds, serviceData, containerTokens, acls); // create the logAggregationContext LogAggregationContext logAggregationContext = LogAggregationContext.newInstance("includePattern", "excludePattern"); // verify containers are stopped on shutdown without recovery YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, false); conf.set(YarnConfiguration.NM_ADDRESS, "localhost:1234"); Context context = createContext(conf, new NMNullStateStoreService()); ContainerManagerImpl cm = spy(createContainerManager(context)); cm.init(conf); cm.start(); StartContainersResponse startResponse = startContainer(context, cm, cid, clc, logAggregationContext); assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); cm.stop(); verify(cm).handle(isA(CMgrCompletedAppsEvent.class)); // verify containers are stopped on shutdown with unsupervised recovery conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, false); NMMemoryStateStoreService memStore = new NMMemoryStateStoreService(); memStore.init(conf); memStore.start(); context = createContext(conf, memStore); cm = spy(createContainerManager(context)); cm.init(conf); cm.start(); startResponse = startContainer(context, cm, cid, clc, logAggregationContext); assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); cm.stop(); memStore.close(); verify(cm).handle(isA(CMgrCompletedAppsEvent.class)); // verify containers are not stopped on shutdown with supervised recovery conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true); memStore = new NMMemoryStateStoreService(); memStore.init(conf); memStore.start(); context = createContext(conf, memStore); cm = spy(createContainerManager(context)); cm.init(conf); cm.start(); startResponse = startContainer(context, cm, cid, clc, logAggregationContext); assertEquals(1, startResponse.getSuccessfullyStartedContainers().size()); cm.stop(); memStore.close(); verify(cm, never()).handle(isA(CMgrCompletedAppsEvent.class)); } private NMContext createContext(YarnConfiguration conf, NMStateStoreService stateStore) { NMContext context = new NMContext(new NMContainerTokenSecretManager( conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), stateStore); // simulate registration with RM MasterKey masterKey = new MasterKeyPBImpl(); masterKey.setKeyId(123); masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) .byteValue() })); context.getContainerTokenSecretManager().setMasterKey(masterKey); context.getNMTokenSecretManager().setMasterKey(masterKey); return context; } private StartContainersResponse startContainer(Context context, final ContainerManagerImpl cm, ContainerId cid, ContainerLaunchContext clc, LogAggregationContext logAggregationContext) throws Exception { UserGroupInformation user = UserGroupInformation.createRemoteUser( cid.getApplicationAttemptId().toString()); StartContainerRequest scReq = StartContainerRequest.newInstance( clc, TestContainerManager.createContainerToken(cid, 0, context.getNodeId(), user.getShortUserName(), context.getContainerTokenSecretManager(), logAggregationContext)); final List<StartContainerRequest> scReqList = new ArrayList<StartContainerRequest>(); scReqList.add(scReq); NMTokenIdentifier nmToken = new NMTokenIdentifier( cid.getApplicationAttemptId(), context.getNodeId(), user.getShortUserName(), context.getNMTokenSecretManager().getCurrentKey().getKeyId()); user.addTokenIdentifier(nmToken); return user.doAs(new PrivilegedExceptionAction<StartContainersResponse>() { @Override public StartContainersResponse run() throws Exception { return cm.startContainers( StartContainersRequest.newInstance(scReqList)); } }); } private void waitForAppState(Application app, ApplicationState state) throws Exception { final int msecPerSleep = 10; int msecLeft = 5000; while (app.getApplicationState() != state && msecLeft > 0) { Thread.sleep(msecPerSleep); msecLeft -= msecPerSleep; } assertEquals(state, app.getApplicationState()); } private ContainerManagerImpl createContainerManager(Context context) { final LogHandler logHandler = mock(LogHandler.class); final ResourceLocalizationService rsrcSrv = new ResourceLocalizationService(null, null, null, null, context) { @Override public void serviceInit(Configuration conf) throws Exception { } @Override public void serviceStart() throws Exception { // do nothing } @Override public void serviceStop() throws Exception { // do nothing } @Override public void handle(LocalizationEvent event) { // do nothing } }; final ContainersLauncher launcher = new ContainersLauncher(context, null, null, null, null) { @Override public void handle(ContainersLauncherEvent event) { // do nothing } }; return new ContainerManagerImpl(context, mock(ContainerExecutor.class), mock(DeletionService.class), mock(NodeStatusUpdater.class), metrics, context.getApplicationACLsManager(), null) { @Override protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) { return logHandler; } @Override protected ResourceLocalizationService createResourceLocalizationService( ContainerExecutor exec, DeletionService deletionContext, Context context) { return rsrcSrv; } @Override protected ContainersLauncher createContainersLauncher( Context context, ContainerExecutor exec) { return launcher; } @Override public void setBlockNewContainerRequests( boolean blockNewContainerRequests) { // do nothing } }; } }
19,436
44.734118
104
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.refEq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerInitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Test; import org.mockito.ArgumentMatcher; public class TestApplication { /** * All container start events before application running. */ @Test public void testApplicationInit1() { WrappedApplication wa = null; try { wa = new WrappedApplication(1, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); assertEquals(1, wa.app.getContainers().size()); wa.initContainer(0); wa.initContainer(2); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); assertEquals(3, wa.app.getContainers().size()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); for (int i = 0; i < wa.containers.size(); i++) { verify(wa.containerBus).handle( argThat(new ContainerInitMatcher(wa.containers.get(i) .getContainerId()))); } } finally { if (wa != null) wa.finished(); } } /** * Container start events after Application Running */ @Test public void testApplicationInit2() { WrappedApplication wa = null; try { wa = new WrappedApplication(2, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(0); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); assertEquals(1, wa.app.getContainers().size()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); verify(wa.containerBus).handle( argThat(new ContainerInitMatcher(wa.containers.get(0) .getContainerId()))); wa.initContainer(1); wa.initContainer(2); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(3, wa.app.getContainers().size()); for (int i = 1; i < wa.containers.size(); i++) { verify(wa.containerBus).handle( argThat(new ContainerInitMatcher(wa.containers.get(i) .getContainerId()))); } } finally { if (wa != null) wa.finished(); } } /** * App state RUNNING after all containers complete, before RM sends * APP_FINISHED */ @Test public void testAppRunningAfterContainersComplete() { WrappedApplication wa = null; try { wa = new WrappedApplication(3, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(2, wa.app.getContainers().size()); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } } /** * Finished containers properly tracked when only container finishes in APP_INITING */ @Test public void testContainersCompleteDuringAppInit1() { WrappedApplication wa = null; try { wa = new WrappedApplication(3, 314159265358979L, "yak", 1); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } } /** * Finished containers properly tracked when 1 of several containers finishes in APP_INITING */ @Test public void testContainersCompleteDuringAppInit2() { WrappedApplication wa = null; try { wa = new WrappedApplication(3, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(2, wa.app.getContainers().size()); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } } @Test @SuppressWarnings("unchecked") public void testAppFinishedOnRunningContainers() { WrappedApplication wa = null; try { wa = new WrappedApplication(4, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(2, wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT, wa.app.getApplicationState()); assertEquals(2, wa.app.getContainers().size()); for (int i = 1; i < wa.containers.size(); i++) { verify(wa.containerBus).handle( argThat(new ContainerKillMatcher(wa.containers.get(i) .getContainerId()))); } wa.containerFinished(1); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT, wa.app.getApplicationState()); assertEquals(1, wa.app.getContainers().size()); reset(wa.localizerBus); wa.containerFinished(2); // All containers finished. Cleanup should be called. assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); verify(wa.localizerBus).handle( refEq(new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, wa.app))); verify(wa.auxBus).handle( refEq(new AuxServicesEvent( AuxServicesEventType.APPLICATION_STOP, wa.appId))); wa.appResourcesCleanedup(); for (Container container : wa.containers) { ContainerTokenIdentifier identifier = wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager() .isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } } protected ContainerTokenIdentifier waitForContainerTokenToExpire( ContainerTokenIdentifier identifier) { int attempts = 5; while (System.currentTimeMillis() < identifier.getExpiryTimeStamp() && attempts-- > 0) { try { Thread.sleep(1000); } catch (Exception e) {} } return identifier; } @Test @SuppressWarnings("unchecked") public void testAppFinishedOnCompletedContainers() { WrappedApplication wa = null; try { wa = new WrappedApplication(5, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); reset(wa.localizerBus); wa.containerFinished(0); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, wa.app.getApplicationState()); verify(wa.localizerBus).handle( refEq(new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, wa.app))); wa.appResourcesCleanedup(); for ( Container container : wa.containers) { ContainerTokenIdentifier identifier = wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager() .isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } } //TODO Re-work after Application transitions are changed. // @Test @SuppressWarnings("unchecked") public void testStartContainerAfterAppFinished() { WrappedApplication wa = null; try { wa = new WrappedApplication(5, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); reset(wa.localizerBus); wa.containerFinished(0); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, wa.app.getApplicationState()); verify(wa.localizerBus).handle( refEq(new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, wa.app))); wa.appResourcesCleanedup(); assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } } //TODO Re-work after Application transitions are changed. // @Test @SuppressWarnings("unchecked") public void testAppFinishedOnIniting() { // AM may send a startContainer() - AM APP_FINIHSED processed after // APP_FINISHED on another NM WrappedApplication wa = null; try { wa = new WrappedApplication(1, 314159265358979L, "yak", 3); wa.initApplication(); wa.initContainer(0); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); assertEquals(1, wa.app.getContainers().size()); reset(wa.localizerBus); wa.appFinished(); verify(wa.containerBus).handle( argThat(new ContainerKillMatcher(wa.containers.get(0) .getContainerId()))); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT, wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, wa.app.getApplicationState()); verify(wa.localizerBus).handle( refEq(new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, wa.app))); wa.initContainer(1); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP, wa.app.getApplicationState()); assertEquals(0, wa.app.getContainers().size()); wa.appResourcesCleanedup(); assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } } @Test public void testNMTokenSecretManagerCleanup() { WrappedApplication wa = null; try { wa = new WrappedApplication(1, 314159265358979L, "yak", 1); wa.initApplication(); wa.initContainer(0); assertEquals(ApplicationState.INITING, wa.app.getApplicationState()); assertEquals(1, wa.app.getContainers().size()); wa.appFinished(); wa.containerFinished(0); wa.appResourcesCleanedup(); assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); verify(wa.nmTokenSecretMgr).appFinished(eq(wa.appId)); } finally { if (wa != null) { wa.finished(); } } } private class ContainerKillMatcher extends ArgumentMatcher<ContainerEvent> { private ContainerId cId; public ContainerKillMatcher(ContainerId cId) { this.cId = cId; } @Override public boolean matches(Object argument) { if (argument instanceof ContainerKillEvent) { ContainerKillEvent event = (ContainerKillEvent) argument; return event.getContainerID().equals(cId); } return false; } } private class ContainerInitMatcher extends ArgumentMatcher<ContainerEvent> { private ContainerId cId; public ContainerInitMatcher(ContainerId cId) { this.cId = cId; } @Override public boolean matches(Object argument) { if (argument instanceof ContainerInitEvent) { ContainerInitEvent event = (ContainerInitEvent) argument; return event.getContainerID().equals(cId); } return false; } } @SuppressWarnings("unchecked") private class WrappedApplication { final DrainDispatcher dispatcher; final EventHandler<LocalizationEvent> localizerBus; final EventHandler<ContainersLauncherEvent> launcherBus; final EventHandler<ContainersMonitorEvent> monitorBus; final EventHandler<AuxServicesEvent> auxBus; final EventHandler<ContainerEvent> containerBus; final EventHandler<LogHandlerEvent> logAggregationBus; final String user; final List<Container> containers; final Context context; final Map<ContainerId, ContainerTokenIdentifier> containerTokenIdentifierMap; final NMTokenSecretManagerInNM nmTokenSecretMgr; final ApplicationId appId; final Application app; WrappedApplication(int id, long timestamp, String user, int numContainers) { Configuration conf = new Configuration(); dispatcher = new DrainDispatcher(); containerTokenIdentifierMap = new HashMap<ContainerId, ContainerTokenIdentifier>(); dispatcher.init(conf); localizerBus = mock(EventHandler.class); launcherBus = mock(EventHandler.class); monitorBus = mock(EventHandler.class); auxBus = mock(EventHandler.class); containerBus = mock(EventHandler.class); logAggregationBus = mock(EventHandler.class); dispatcher.register(LocalizationEventType.class, localizerBus); dispatcher.register(ContainersLauncherEventType.class, launcherBus); dispatcher.register(ContainersMonitorEventType.class, monitorBus); dispatcher.register(AuxServicesEventType.class, auxBus); dispatcher.register(ContainerEventType.class, containerBus); dispatcher.register(LogHandlerEventType.class, logAggregationBus); nmTokenSecretMgr = mock(NMTokenSecretManagerInNM.class); context = mock(Context.class); when(context.getContainerTokenSecretManager()).thenReturn( new NMContainerTokenSecretManager(conf)); when(context.getApplicationACLsManager()).thenReturn( new ApplicationACLsManager(conf)); when(context.getNMTokenSecretManager()).thenReturn(nmTokenSecretMgr); // Setting master key MasterKey masterKey = new MasterKeyPBImpl(); masterKey.setKeyId(123); masterKey.setBytes(ByteBuffer.wrap(new byte[] { (new Integer(123) .byteValue()) })); context.getContainerTokenSecretManager().setMasterKey(masterKey); this.user = user; this.appId = BuilderUtils.newApplicationId(timestamp, id); app = new ApplicationImpl(dispatcher, this.user, appId, null, context); containers = new ArrayList<Container>(); for (int i = 0; i < numContainers; i++) { Container container = createMockedContainer(this.appId, i); containers.add(container); long currentTime = System.currentTimeMillis(); ContainerTokenIdentifier identifier = new ContainerTokenIdentifier(container.getContainerId(), "", "", null, currentTime + 2000, masterKey.getKeyId(), currentTime, Priority.newInstance(0), 0); containerTokenIdentifierMap .put(identifier.getContainerID(), identifier); context.getContainerTokenSecretManager().startContainerSuccessful( identifier); Assert.assertFalse(context.getContainerTokenSecretManager() .isValidStartContainerRequest(identifier)); } dispatcher.start(); } private void drainDispatcherEvents() { dispatcher.await(); } public void finished() { dispatcher.stop(); } public void initApplication() { app.handle(new ApplicationInitEvent(appId, new HashMap<ApplicationAccessType, String>())); } public void initContainer(int containerNum) { if (containerNum == -1) { for (int i = 0; i < containers.size(); i++) { app.handle(new ApplicationContainerInitEvent(containers.get(i))); } } else { app.handle(new ApplicationContainerInitEvent(containers.get(containerNum))); } drainDispatcherEvents(); } public void containerFinished(int containerNum) { app.handle(new ApplicationContainerFinishedEvent(containers.get( containerNum).getContainerId())); drainDispatcherEvents(); } public void applicationInited() { app.handle(new ApplicationInitedEvent(appId)); drainDispatcherEvents(); } public void appFinished() { app.handle(new ApplicationFinishEvent(appId, "Finish Application")); drainDispatcherEvents(); } public void appResourcesCleanedup() { app.handle(new ApplicationEvent(appId, ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); drainDispatcherEvents(); } public ContainerTokenIdentifier getContainerTokenIdentifier( ContainerId containerId) { return this.containerTokenIdentifierMap.get(containerId); } } private Container createMockedContainer(ApplicationId appId, int containerId) { ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, containerId); Container c = mock(Container.class); when(c.getContainerId()).thenReturn(cId); ContainerLaunchContext launchContext = mock(ContainerLaunchContext.class); when(c.getLaunchContext()).thenReturn(launchContext); when(launchContext.getApplicationACLs()).thenReturn( new HashMap<ApplicationAccessType, String>()); return c; } }
22,949
36.016129
111
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.util.AbstractMap.SimpleEntry; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Test; import org.mockito.ArgumentMatcher; public class TestContainer { final NodeManagerMetrics metrics = NodeManagerMetrics.create(); final Configuration conf = new YarnConfiguration(); final String FAKE_LOCALIZATION_ERROR = "Fake localization error"; /** * Verify correct container request events sent to localizer. */ @Test public void testLocalizationRequest() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(7, 314159265358979L, 4344, "yak"); assertEquals(ContainerState.NEW, wc.c.getContainerState()); wc.initContainer(); // Verify request for public/private resources to localizer ResourcesRequestedMatcher matchesReq = new ResourcesRequestedMatcher(wc.localResources, EnumSet.of( LocalResourceVisibility.PUBLIC, LocalResourceVisibility.PRIVATE, LocalResourceVisibility.APPLICATION)); verify(wc.localizerBus).handle(argThat(matchesReq)); assertEquals(ContainerState.LOCALIZING, wc.c.getContainerState()); } finally { if (wc != null) { wc.finished(); } } } /** * Verify container launch when all resources already cached. */ @Test public void testLocalizationLaunch() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(8, 314159265358979L, 4344, "yak"); assertEquals(ContainerState.NEW, wc.c.getContainerState()); wc.initContainer(); Map<Path, List<String>> localPaths = wc.localizeResources(); // all resources should be localized assertEquals(ContainerState.LOCALIZED, wc.c.getContainerState()); assertNotNull(wc.c.getLocalizedResources()); for (Entry<Path, List<String>> loc : wc.c.getLocalizedResources() .entrySet()) { assertEquals(localPaths.remove(loc.getKey()), loc.getValue()); } assertTrue(localPaths.isEmpty()); final WrappedContainer wcf = wc; // verify container launch ArgumentMatcher<ContainersLauncherEvent> matchesContainerLaunch = new ArgumentMatcher<ContainersLauncherEvent>() { @Override public boolean matches(Object o) { ContainersLauncherEvent launchEvent = (ContainersLauncherEvent) o; return wcf.c == launchEvent.getContainer(); } }; verify(wc.launcherBus).handle(argThat(matchesContainerLaunch)); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testExternalKill() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(13, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); int running = metrics.getRunningContainers(); wc.launchContainer(); assertEquals(running + 1, metrics.getRunningContainers()); reset(wc.localizerBus); wc.containerKilledOnRequest(); assertEquals(ContainerState.EXITED_WITH_FAILURE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); int failed = metrics.getFailedContainers(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(failed + 1, metrics.getFailedContainers()); assertEquals(running, metrics.getRunningContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testCleanupOnFailure() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(10, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode()); assertEquals(ContainerState.EXITED_WITH_FAILURE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testCleanupOnSuccess() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(11, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); int running = metrics.getRunningContainers(); wc.launchContainer(); assertEquals(running + 1, metrics.getRunningContainers()); reset(wc.localizerBus); wc.containerSuccessful(); assertEquals(ContainerState.EXITED_WITH_SUCCESS, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); int completed = metrics.getCompletedContainers(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(completed + 1, metrics.getCompletedContainers()); assertEquals(running, metrics.getRunningContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testInitWhileDone() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(6, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); // Now in DONE, issue INIT wc.initContainer(); // Verify still in DONE assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testLocalizationFailureAtDone() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(6, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); // Now in DONE, issue RESOURCE_FAILED as done by LocalizeRunner wc.resourceFailedContainer(); // Verify still in DONE assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test @SuppressWarnings("unchecked") // mocked generic public void testCleanupOnKillRequest() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(12, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.killContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test public void testKillOnNew() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(13, 314159265358979L, 4344, "yak"); assertEquals(ContainerState.NEW, wc.c.getContainerState()); int killed = metrics.getKilledContainers(); wc.killContainer(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics() .contains("KillRequest")); assertEquals(killed + 1, metrics.getKilledContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test public void testKillOnLocalizing() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(14, 314159265358979L, 4344, "yak"); wc.initContainer(); assertEquals(ContainerState.LOCALIZING, wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics() .contains("KillRequest")); int killed = metrics.getKilledContainers(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(killed + 1, metrics.getKilledContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test public void testKillOnLocalizationFailed() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(15, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.failLocalizeResources(wc.getLocalResourceCount()); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.killContainer(); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); int failed = metrics.getFailedContainers(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(failed + 1, metrics.getFailedContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(17, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED, wc.c.getContainerState()); ContainerLaunch launcher = wc.launcher.running.get(wc.c.getContainerId()); wc.killContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); int killed = metrics.getKilledContainers(); wc.c.handle(new ContainerEvent(wc.c.getContainerId(), ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP)); assertEquals(ContainerState.DONE, wc.c.getContainerState()); assertEquals(killed + 1, metrics.getKilledContainers()); assertEquals(0, metrics.getRunningContainers()); } finally { if (wc != null) { wc.finished(); } } } @Test public void testKillOnLocalizedWhenContainerLaunched() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(17, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED, wc.c.getContainerState()); ContainerLaunch launcher = wc.launcher.running.get(wc.c.getContainerId()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.EXITED_WITH_FAILURE, wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.EXITED_WITH_FAILURE, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test public void testResourceLocalizedOnLocalizationFailed() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(16, 314159265358979L, 4344, "yak"); wc.initContainer(); int failCount = wc.getLocalResourceCount()/2; if (failCount == 0) { failCount = 1; } wc.failLocalizeResources(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.localizeResourcesFromInvalidState(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); Assert.assertTrue(wc.getDiagnostics().contains(FAKE_LOCALIZATION_ERROR)); } finally { if (wc != null) { wc.finished(); } } } @Test public void testResourceFailedOnLocalizationFailed() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(16, 314159265358979L, 4344, "yak"); wc.initContainer(); Iterator<String> lRsrcKeys = wc.localResources.keySet().iterator(); String key1 = lRsrcKeys.next(); String key2 = lRsrcKeys.next(); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key2); assertEquals(ContainerState.LOCALIZATION_FAILED, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } @Test public void testResourceFailedOnKilling() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(16, 314159265358979L, 4344, "yak"); wc.initContainer(); Iterator<String> lRsrcKeys = wc.localResources.keySet().iterator(); String key1 = lRsrcKeys.next(); wc.killContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } /** * Verify serviceData correctly sent. */ @Test public void testServiceData() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(9, 314159265358979L, 4344, "yak", false, true); assertEquals(ContainerState.NEW, wc.c.getContainerState()); wc.initContainer(); for (final Map.Entry<String,ByteBuffer> e : wc.serviceData.entrySet()) { ArgumentMatcher<AuxServicesEvent> matchesServiceReq = new ArgumentMatcher<AuxServicesEvent>() { @Override public boolean matches(Object o) { AuxServicesEvent evt = (AuxServicesEvent) o; return e.getKey().equals(evt.getServiceID()) && 0 == e.getValue().compareTo(evt.getServiceData()); } }; verify(wc.auxBus).handle(argThat(matchesServiceReq)); } final WrappedContainer wcf = wc; // verify launch on empty resource request ArgumentMatcher<ContainersLauncherEvent> matchesLaunchReq = new ArgumentMatcher<ContainersLauncherEvent>() { @Override public boolean matches(Object o) { ContainersLauncherEvent evt = (ContainersLauncherEvent) o; return evt.getType() == ContainersLauncherEventType.LAUNCH_CONTAINER && wcf.cId.equals(evt.getContainer().getContainerId()); } }; verify(wc.launcherBus).handle(argThat(matchesLaunchReq)); } finally { if (wc != null) { wc.finished(); } } } @Test public void testLaunchAfterKillRequest() throws Exception { WrappedContainer wc = null; try { wc = new WrappedContainer(14, 314159265358979L, 4344, "yak"); wc.initContainer(); wc.localizeResources(); wc.killContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.launchContainer(); assertEquals(ContainerState.KILLING, wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } } private void verifyCleanupCall(WrappedContainer wc) throws Exception { ResourcesReleasedMatcher matchesReq = new ResourcesReleasedMatcher(wc.localResources, EnumSet.of( LocalResourceVisibility.PUBLIC, LocalResourceVisibility.PRIVATE, LocalResourceVisibility.APPLICATION)); verify(wc.localizerBus).handle(argThat(matchesReq)); } private static class ResourcesReleasedMatcher extends ArgumentMatcher<LocalizationEvent> { final HashSet<LocalResourceRequest> resources = new HashSet<LocalResourceRequest>(); ResourcesReleasedMatcher(Map<String, LocalResource> allResources, EnumSet<LocalResourceVisibility> vis) throws URISyntaxException { for (Entry<String, LocalResource> e : allResources.entrySet()) { if (vis.contains(e.getValue().getVisibility())) { resources.add(new LocalResourceRequest(e.getValue())); } } } @Override public boolean matches(Object o) { if (!(o instanceof ContainerLocalizationCleanupEvent)) { return false; } ContainerLocalizationCleanupEvent evt = (ContainerLocalizationCleanupEvent) o; final HashSet<LocalResourceRequest> expected = new HashSet<LocalResourceRequest>(resources); for (Collection<LocalResourceRequest> rc : evt.getResources().values()) { for (LocalResourceRequest rsrc : rc) { if (!expected.remove(rsrc)) { return false; } } } return expected.isEmpty(); } } // Accept iff the resource payload matches. private static class ResourcesRequestedMatcher extends ArgumentMatcher<LocalizationEvent> { final HashSet<LocalResourceRequest> resources = new HashSet<LocalResourceRequest>(); ResourcesRequestedMatcher(Map<String, LocalResource> allResources, EnumSet<LocalResourceVisibility> vis) throws URISyntaxException { for (Entry<String, LocalResource> e : allResources.entrySet()) { if (vis.contains(e.getValue().getVisibility())) { resources.add(new LocalResourceRequest(e.getValue())); } } } @Override public boolean matches(Object o) { ContainerLocalizationRequestEvent evt = (ContainerLocalizationRequestEvent) o; final HashSet<LocalResourceRequest> expected = new HashSet<LocalResourceRequest>(resources); for (Collection<LocalResourceRequest> rc : evt.getRequestedResources() .values()) { for (LocalResourceRequest rsrc : rc) { if (!expected.remove(rsrc)) { return false; } } } return expected.isEmpty(); } } private static Entry<String, LocalResource> getMockRsrc(Random r, LocalResourceVisibility vis) { String name = Long.toHexString(r.nextLong()); URL url = BuilderUtils.newURL("file", null, 0, "/local" + vis + "/" + name); LocalResource rsrc = BuilderUtils.newLocalResource(url, LocalResourceType.FILE, vis, r.nextInt(1024) + 1024L, r.nextInt(1024) + 2048L, false); return new SimpleEntry<String, LocalResource>(name, rsrc); } private static Map<String,LocalResource> createLocalResources(Random r) { Map<String,LocalResource> localResources = new HashMap<String,LocalResource>(); for (int i = r.nextInt(5) + 5; i >= 0; --i) { Entry<String,LocalResource> rsrc = getMockRsrc(r, LocalResourceVisibility.PUBLIC); localResources.put(rsrc.getKey(), rsrc.getValue()); } for (int i = r.nextInt(5) + 5; i >= 0; --i) { Entry<String,LocalResource> rsrc = getMockRsrc(r, LocalResourceVisibility.PRIVATE); localResources.put(rsrc.getKey(), rsrc.getValue()); } for (int i = r.nextInt(2) + 2; i >= 0; --i) { Entry<String,LocalResource> rsrc = getMockRsrc(r, LocalResourceVisibility.APPLICATION); localResources.put(rsrc.getKey(), rsrc.getValue()); } return localResources; } private static Map<String,ByteBuffer> createServiceData(Random r) { Map<String,ByteBuffer> serviceData = new HashMap<String,ByteBuffer>(); for (int i = r.nextInt(5) + 5; i >= 0; --i) { String service = Long.toHexString(r.nextLong()); byte[] b = new byte[r.nextInt(1024) + 1024]; r.nextBytes(b); serviceData.put(service, ByteBuffer.wrap(b)); } return serviceData; } @SuppressWarnings("unchecked") private class WrappedContainer { final DrainDispatcher dispatcher; final EventHandler<LocalizationEvent> localizerBus; final EventHandler<ContainersLauncherEvent> launcherBus; final EventHandler<ContainersMonitorEvent> monitorBus; final EventHandler<AuxServicesEvent> auxBus; final EventHandler<ApplicationEvent> appBus; final EventHandler<LogHandlerEvent> LogBus; final ContainersLauncher launcher; final ContainerLaunchContext ctxt; final ContainerId cId; final Container c; final Map<String, LocalResource> localResources; final Map<String, ByteBuffer> serviceData; WrappedContainer(int appId, long timestamp, int id, String user) throws IOException { this(appId, timestamp, id, user, true, false); } @SuppressWarnings("rawtypes") WrappedContainer(int appId, long timestamp, int id, String user, boolean withLocalRes, boolean withServiceData) throws IOException { dispatcher = new DrainDispatcher(); dispatcher.init(new Configuration()); localizerBus = mock(EventHandler.class); launcherBus = mock(EventHandler.class); monitorBus = mock(EventHandler.class); auxBus = mock(EventHandler.class); appBus = mock(EventHandler.class); LogBus = mock(EventHandler.class); dispatcher.register(LocalizationEventType.class, localizerBus); dispatcher.register(ContainersLauncherEventType.class, launcherBus); dispatcher.register(ContainersMonitorEventType.class, monitorBus); dispatcher.register(AuxServicesEventType.class, auxBus); dispatcher.register(ApplicationEventType.class, appBus); dispatcher.register(LogHandlerEventType.class, LogBus); Context context = mock(Context.class); when(context.getApplications()).thenReturn( new ConcurrentHashMap<ApplicationId, Application>()); NMNullStateStoreService stateStore = new NMNullStateStoreService(); when(context.getNMStateStore()).thenReturn(stateStore); ContainerExecutor executor = mock(ContainerExecutor.class); launcher = new ContainersLauncher(context, dispatcher, executor, null, null); // create a mock ExecutorService, which will not really launch // ContainerLaunch at all. launcher.containerLauncher = mock(ExecutorService.class); Future future = mock(Future.class); when(launcher.containerLauncher.submit (any(Callable.class))).thenReturn(future); when(future.isDone()).thenReturn(false); when(future.cancel(false)).thenReturn(true); launcher.init(new Configuration()); launcher.start(); dispatcher.register(ContainersLauncherEventType.class, launcher); ctxt = mock(ContainerLaunchContext.class); org.apache.hadoop.yarn.api.records.Container mockContainer = mock(org.apache.hadoop.yarn.api.records.Container.class); cId = BuilderUtils.newContainerId(appId, 1, timestamp, id); when(mockContainer.getId()).thenReturn(cId); Resource resource = BuilderUtils.newResource(1024, 1); when(mockContainer.getResource()).thenReturn(resource); String host = "127.0.0.1"; int port = 1234; long currentTime = System.currentTimeMillis(); ContainerTokenIdentifier identifier = new ContainerTokenIdentifier(cId, "127.0.0.1", user, resource, currentTime + 10000L, 123, currentTime, Priority.newInstance(0), 0); Token token = BuilderUtils.newContainerToken(BuilderUtils.newNodeId(host, port), "password".getBytes(), identifier); when(mockContainer.getContainerToken()).thenReturn(token); if (withLocalRes) { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("WrappedContainerLocalResource seed: " + seed); localResources = createLocalResources(r); } else { localResources = Collections.<String, LocalResource> emptyMap(); } when(ctxt.getLocalResources()).thenReturn(localResources); if (withServiceData) { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("ServiceData seed: " + seed); serviceData = createServiceData(r); } else { serviceData = Collections.<String, ByteBuffer> emptyMap(); } when(ctxt.getServiceData()).thenReturn(serviceData); c = new ContainerImpl(conf, dispatcher, new NMNullStateStoreService(), ctxt, null, metrics, identifier); dispatcher.register(ContainerEventType.class, new EventHandler<ContainerEvent>() { @Override public void handle(ContainerEvent event) { c.handle(event); } }); dispatcher.start(); } private void drainDispatcherEvents() { dispatcher.await(); } public void finished() { dispatcher.stop(); } public void initContainer() { c.handle(new ContainerEvent(cId, ContainerEventType.INIT_CONTAINER)); drainDispatcherEvents(); } public void resourceFailedContainer() { c.handle(new ContainerEvent(cId, ContainerEventType.RESOURCE_FAILED)); drainDispatcherEvents(); } // Localize resources // Skip some resources so as to consider them failed public Map<Path, List<String>> doLocalizeResources( boolean checkLocalizingState, int skipRsrcCount) throws URISyntaxException { Path cache = new Path("file:///cache"); Map<Path, List<String>> localPaths = new HashMap<Path, List<String>>(); int counter = 0; for (Entry<String, LocalResource> rsrc : localResources.entrySet()) { if (counter++ < skipRsrcCount) { continue; } if (checkLocalizingState) { assertEquals(ContainerState.LOCALIZING, c.getContainerState()); } LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue()); Path p = new Path(cache, rsrc.getKey()); localPaths.put(p, Arrays.asList(rsrc.getKey())); // rsrc copied to p c.handle(new ContainerResourceLocalizedEvent(c.getContainerId(), req, p)); } drainDispatcherEvents(); return localPaths; } public Map<Path, List<String>> localizeResources() throws URISyntaxException { return doLocalizeResources(true, 0); } public void localizeResourcesFromInvalidState(int skipRsrcCount) throws URISyntaxException { doLocalizeResources(false, skipRsrcCount); } public void failLocalizeSpecificResource(String rsrcKey) throws URISyntaxException { LocalResource rsrc = localResources.get(rsrcKey); LocalResourceRequest req = new LocalResourceRequest(rsrc); Exception e = new Exception(FAKE_LOCALIZATION_ERROR); c.handle(new ContainerResourceFailedEvent(c.getContainerId(), req, e .getMessage())); drainDispatcherEvents(); } // fail to localize some resources public void failLocalizeResources(int failRsrcCount) throws URISyntaxException { int counter = 0; for (Entry<String, LocalResource> rsrc : localResources.entrySet()) { if (counter >= failRsrcCount) { break; } ++counter; LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue()); Exception e = new Exception(FAKE_LOCALIZATION_ERROR); c.handle(new ContainerResourceFailedEvent(c.getContainerId(), req, e.getMessage())); } drainDispatcherEvents(); } public void launchContainer() { c.handle(new ContainerEvent(cId, ContainerEventType.CONTAINER_LAUNCHED)); drainDispatcherEvents(); } public void containerSuccessful() { c.handle(new ContainerEvent(cId, ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS)); drainDispatcherEvents(); } public void containerResourcesCleanup() { c.handle(new ContainerEvent(cId, ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP)); drainDispatcherEvents(); } public void containerFailed(int exitCode) { String diagnosticMsg = "Container completed with exit code " + exitCode; c.handle(new ContainerExitEvent(cId, ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitCode, diagnosticMsg)); ContainerStatus containerStatus = c.cloneAndGetContainerStatus(); assert containerStatus.getDiagnostics().contains(diagnosticMsg); assert containerStatus.getExitStatus() == exitCode; drainDispatcherEvents(); } public void killContainer() { c.handle(new ContainerKillEvent(cId, ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, "KillRequest")); drainDispatcherEvents(); } public void containerKilledOnRequest() { int exitCode = ContainerExitStatus.KILLED_BY_RESOURCEMANAGER; String diagnosticMsg = "Container completed with exit code " + exitCode; c.handle(new ContainerExitEvent(cId, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode, diagnosticMsg)); ContainerStatus containerStatus = c.cloneAndGetContainerStatus(); assert containerStatus.getDiagnostics().contains(diagnosticMsg); assert containerStatus.getExitStatus() == exitCode; drainDispatcherEvents(); } public int getLocalResourceCount() { return localResources.size(); } public String getDiagnostics() { return c.cloneAndGetContainerStatus().getDiagnostics(); } } }
36,082
36.586458
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.*; public class TestDockerContainerRuntime { private Configuration conf; PrivilegedOperationExecutor mockExecutor; String containerId; Container container; ContainerId cId; ContainerLaunchContext context; HashMap<String, String> env; String image; String runAsUser; String user; String appId; String containerIdStr = containerId; Path containerWorkDir; Path nmPrivateContainerScriptPath; Path nmPrivateTokensPath; Path pidFilePath; List<String> localDirs; List<String> logDirs; String resourcesOptions; @Before public void setup() { String tmpPath = new StringBuffer(System.getProperty("test.build.data")) .append ('/').append("hadoop.tmp.dir").toString(); conf = new Configuration(); conf.set("hadoop.tmp.dir", tmpPath); mockExecutor = Mockito .mock(PrivilegedOperationExecutor.class); containerId = "container_id"; container = mock(Container.class); cId = mock(ContainerId.class); context = mock(ContainerLaunchContext.class); env = new HashMap<String, String>(); image = "busybox:latest"; env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image); when(container.getContainerId()).thenReturn(cId); when(cId.toString()).thenReturn(containerId); when(container.getLaunchContext()).thenReturn(context); when(context.getEnvironment()).thenReturn(env); runAsUser = "run_as_user"; user = "user"; appId = "app_id"; containerIdStr = containerId; containerWorkDir = new Path("/test_container_work_dir"); nmPrivateContainerScriptPath = new Path("/test_script_path"); nmPrivateTokensPath = new Path("/test_private_tokens_path"); pidFilePath = new Path("/test_pid_file_path"); localDirs = new ArrayList<>(); logDirs = new ArrayList<>(); resourcesOptions = "cgroups:none"; localDirs.add("/test_local_dir"); logDirs.add("/test_log_dir"); } @Test public void testSelectDockerContainerType() { Map<String, String> envDockerType = new HashMap<>(); Map<String, String> envOtherType = new HashMap<>(); envDockerType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "docker"); envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other"); Assert.assertEquals(false, DockerLinuxContainerRuntime .isDockerContainerRequested(null)); Assert.assertEquals(true, DockerLinuxContainerRuntime .isDockerContainerRequested(envDockerType)); Assert.assertEquals(false, DockerLinuxContainerRuntime .isDockerContainerRequested(envOtherType)); } @Test @SuppressWarnings("unchecked") public void testDockerContainerLaunch() throws ContainerExecutionException, PrivilegedOperationException, IOException { DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime( mockExecutor); runtime.initialize(conf); ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext .Builder(container); builder.setExecutionAttribute(RUN_AS_USER, runAsUser) .setExecutionAttribute(USER, user) .setExecutionAttribute(APPID, appId) .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr) .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir) .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, nmPrivateContainerScriptPath) .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath) .setExecutionAttribute(PID_FILE_PATH, pidFilePath) .setExecutionAttribute(LOCAL_DIRS, localDirs) .setExecutionAttribute(LOG_DIRS, logDirs) .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions); runtime.launchContainer(builder.build()); ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass( PrivilegedOperation.class); //single invocation expected //due to type erasure + mocking, this verification requires a suppress // warning annotation on the entire method verify(mockExecutor, times(1)) .executePrivilegedOperation(anyList(), opCaptor.capture(), any( File.class), any(Map.class), eq(false)); PrivilegedOperation op = opCaptor.getValue(); Assert.assertEquals(PrivilegedOperation.OperationType .LAUNCH_DOCKER_CONTAINER, op.getOperationType()); List<String> args = op.getArguments(); //This invocation of container-executor should use 13 arguments in a // specific order (sigh.) Assert.assertEquals(13, args.size()); //verify arguments Assert.assertEquals(runAsUser, args.get(0)); Assert.assertEquals(user, args.get(1)); Assert.assertEquals(Integer.toString(PrivilegedOperation.RunAsUserCommand .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(2)); Assert.assertEquals(appId, args.get(3)); Assert.assertEquals(containerId, args.get(4)); Assert.assertEquals(containerWorkDir.toString(), args.get(5)); Assert.assertEquals(nmPrivateContainerScriptPath.toUri() .toString(), args.get(6)); Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(), args.get(7)); Assert.assertEquals(pidFilePath.toString(), args.get(8)); Assert.assertEquals(localDirs.get(0), args.get(9)); Assert.assertEquals(logDirs.get(0), args.get(10)); Assert.assertEquals(resourcesOptions, args.get(12)); String dockerCommandFile = args.get(11); //This is the expected docker invocation for this case StringBuffer expectedCommandTemplate = new StringBuffer("run --name=%1$s ") .append("--user=%2$s -d ") .append("--workdir=%3$s ") .append("--net=host -v /etc/passwd:/etc/password:ro ") .append("-v %4$s:%4$s ") .append("-v %5$s:%5$s ") .append("-v %6$s:%6$s ") .append("%7$s ") .append("bash %8$s/launch_container.sh"); String expectedCommand = String.format(expectedCommandTemplate.toString(), containerId, runAsUser, containerWorkDir, localDirs.get(0), containerWorkDir, logDirs.get(0), image, containerWorkDir); List<String> dockerCommands = Files.readAllLines(Paths.get (dockerCommandFile), Charset.forName("UTF-8")); Assert.assertEquals(1, dockerCommands.size()); Assert.assertEquals(expectedCommand, dockerCommands.get(0)); } }
8,780
38.913636
120
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.List; public class TestPrivilegedOperationExecutor { private static final Log LOG = LogFactory .getLog(TestPrivilegedOperationExecutor.class); private String localDataDir; private String customExecutorPath; private Configuration nullConf = null; private Configuration emptyConf; private Configuration confWithExecutorPath; private String cGroupTasksNone; private String cGroupTasksInvalid; private String cGroupTasks1; private String cGroupTasks2; private String cGroupTasks3; private PrivilegedOperation opDisallowed; private PrivilegedOperation opTasksNone; private PrivilegedOperation opTasksInvalid; private PrivilegedOperation opTasks1; private PrivilegedOperation opTasks2; private PrivilegedOperation opTasks3; @Before public void setup() { localDataDir = System.getProperty("test.build.data"); customExecutorPath = localDataDir + "/bin/container-executor"; emptyConf = new YarnConfiguration(); confWithExecutorPath = new YarnConfiguration(); confWithExecutorPath.set(YarnConfiguration .NM_LINUX_CONTAINER_EXECUTOR_PATH, customExecutorPath); cGroupTasksNone = "none"; cGroupTasksInvalid = "invalid_string"; cGroupTasks1 = "cpu/hadoop_yarn/container_01/tasks"; cGroupTasks2 = "net_cls/hadoop_yarn/container_01/tasks"; cGroupTasks3 = "blkio/hadoop_yarn/container_01/tasks"; opDisallowed = new PrivilegedOperation (PrivilegedOperation.OperationType.DELETE_AS_USER, (String) null); opTasksNone = new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasksNone); opTasksInvalid = new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, cGroupTasksInvalid); opTasks1 = new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks1); opTasks2 = new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks2); opTasks3 = new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks3); } @Test public void testExecutorPath() { String containerExePath = PrivilegedOperationExecutor .getContainerExecutorExecutablePath(nullConf); //In case HADOOP_YARN_HOME isn't set, CWD is used. If conf is null or //NM_LINUX_CONTAINER_EXECUTOR_PATH is not set, then a defaultPath is //constructed. String yarnHomeEnvVar = System.getenv("HADOOP_YARN_HOME"); String yarnHome = yarnHomeEnvVar != null ? yarnHomeEnvVar : new File("").getAbsolutePath(); String expectedPath = yarnHome + "/bin/container-executor"; Assert.assertEquals(expectedPath, containerExePath); containerExePath = PrivilegedOperationExecutor .getContainerExecutorExecutablePath(emptyConf); Assert.assertEquals(expectedPath, containerExePath); //if NM_LINUX_CONTAINER_EXECUTOR_PATH is set, this must be returned expectedPath = customExecutorPath; containerExePath = PrivilegedOperationExecutor .getContainerExecutorExecutablePath(confWithExecutorPath); Assert.assertEquals(expectedPath, containerExePath); } @Test public void testExecutionCommand() { PrivilegedOperationExecutor exec = PrivilegedOperationExecutor .getInstance(confWithExecutorPath); PrivilegedOperation op = new PrivilegedOperation(PrivilegedOperation .OperationType.TC_MODIFY_STATE, (String) null); String[] cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op); //No arguments added - so the resulting array should consist of //1)full path to executor 2) cli switch Assert.assertEquals(2, cmdArray.length); Assert.assertEquals(customExecutorPath, cmdArray[0]); Assert.assertEquals(op.getOperationType().getOption(), cmdArray[1]); //other (dummy) arguments to tc modify state String[] additionalArgs = { "cmd_file_1", "cmd_file_2", "cmd_file_3"}; op.appendArgs(additionalArgs); cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op); //Resulting array should be of length 2 greater than the number of //additional arguments added. Assert.assertEquals(2 + additionalArgs.length, cmdArray.length); Assert.assertEquals(customExecutorPath, cmdArray[0]); Assert.assertEquals(op.getOperationType().getOption(), cmdArray[1]); //Rest of args should be same as additional args. for (int i = 0; i < additionalArgs.length; ++i) { Assert.assertEquals(additionalArgs[i], cmdArray[2 + i]); } //Now test prefix commands List<String> prefixCommands = Arrays.asList("nice", "-10"); cmdArray = exec.getPrivilegedOperationExecutionCommand(prefixCommands, op); int prefixLength = prefixCommands.size(); //Resulting array should be of length of prefix command args + 2 (exec // path + switch) + length of additional args. Assert.assertEquals(prefixLength + 2 + additionalArgs.length, cmdArray.length); //Prefix command array comes first for (int i = 0; i < prefixLength; ++i) { Assert.assertEquals(prefixCommands.get(i), cmdArray[i]); } //Followed by the container executor path and the cli switch Assert.assertEquals(customExecutorPath, cmdArray[prefixLength]); Assert.assertEquals(op.getOperationType().getOption(), cmdArray[prefixLength + 1]); //Followed by the rest of the args //Rest of args should be same as additional args. for (int i = 0; i < additionalArgs.length; ++i) { Assert.assertEquals(additionalArgs[i], cmdArray[prefixLength + 2 + i]); } } @Test public void testSquashCGroupOperationsWithInvalidOperations() { List<PrivilegedOperation> ops = new ArrayList<>(); //Ensure that disallowed ops are rejected ops.add(opTasksNone); ops.add(opDisallowed); try { PrivilegedOperationExecutor.squashCGroupOperations(ops); Assert.fail("Expected squash operation to fail with an exception!"); } catch (PrivilegedOperationException e) { LOG.info("Caught expected exception : " + e); } //Ensure that invalid strings are rejected ops.clear(); ops.add(opTasksNone); ops.add(opTasksInvalid); try { PrivilegedOperationExecutor.squashCGroupOperations(ops); Assert.fail("Expected squash operation to fail with an exception!"); } catch (PrivilegedOperationException e) { LOG.info("Caught expected exception : " + e); } } @Test public void testSquashCGroupOperationsWithValidOperations() { List<PrivilegedOperation> ops = new ArrayList<>(); //Test squashing, including 'none' ops.clear(); ops.add(opTasks1); //this is expected to be ignored ops.add(opTasksNone); ops.add(opTasks2); ops.add(opTasks3); try { PrivilegedOperation op = PrivilegedOperationExecutor .squashCGroupOperations(ops); String expected = new StringBuffer (PrivilegedOperation.CGROUP_ARG_PREFIX) .append(cGroupTasks1).append(PrivilegedOperation .LINUX_FILE_PATH_SEPARATOR) .append(cGroupTasks2).append(PrivilegedOperation .LINUX_FILE_PATH_SEPARATOR) .append(cGroupTasks3).toString(); //We expect axactly one argument Assert.assertEquals(1, op.getArguments().size()); //Squashed list of tasks files Assert.assertEquals(expected, op.getArguments().get(0)); } catch (PrivilegedOperationException e) { LOG.info("Caught unexpected exception : " + e); Assert.fail("Caught unexpected exception: " + e); } } }
9,064
37.905579
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import java.io.File; import java.util.List; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; public class TestTrafficControlBandwidthHandlerImpl { private static final Log LOG = LogFactory.getLog(TestTrafficControlBandwidthHandlerImpl.class); private static final int ROOT_BANDWIDTH_MBIT = 100; private static final int YARN_BANDWIDTH_MBIT = 70; private static final int TEST_CLASSID = 100; private static final String TEST_CLASSID_STR = "42:100"; private static final String TEST_CONTAINER_ID_STR = "container_01"; private static final String TEST_TASKS_FILE = "testTasksFile"; private PrivilegedOperationExecutor privilegedOperationExecutorMock; private CGroupsHandler cGroupsHandlerMock; private TrafficController trafficControllerMock; private Configuration conf; private String tmpPath; private String device; ContainerId containerIdMock; Container containerMock; @Before public void setup() { privilegedOperationExecutorMock = mock(PrivilegedOperationExecutor.class); cGroupsHandlerMock = mock(CGroupsHandler.class); trafficControllerMock = mock(TrafficController.class); conf = new YarnConfiguration(); tmpPath = new StringBuffer(System.getProperty("test.build.data")).append ('/').append("hadoop.tmp.dir").toString(); device = YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE; containerIdMock = mock(ContainerId.class); containerMock = mock(Container.class); when(containerIdMock.toString()).thenReturn(TEST_CONTAINER_ID_STR); //mock returning a mock - an angel died somewhere. when(containerMock.getContainerId()).thenReturn(containerIdMock); conf.setInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, ROOT_BANDWIDTH_MBIT); conf.setInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, YARN_BANDWIDTH_MBIT); conf.set("hadoop.tmp.dir", tmpPath); //In these tests, we'll only use TrafficController with recovery disabled conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); } @Test public void testBootstrap() { TrafficControlBandwidthHandlerImpl handlerImpl = new TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock, cGroupsHandlerMock, trafficControllerMock); try { handlerImpl.bootstrap(conf); verify(cGroupsHandlerMock).mountCGroupController( eq(CGroupsHandler.CGroupController.NET_CLS)); verifyNoMoreInteractions(cGroupsHandlerMock); verify(trafficControllerMock).bootstrap(eq(device), eq(ROOT_BANDWIDTH_MBIT), eq(YARN_BANDWIDTH_MBIT)); verifyNoMoreInteractions(trafficControllerMock); } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected ResourceHandlerException!"); } } @Test public void testLifeCycle() { TrafficController trafficControllerSpy = spy(new TrafficController(conf, privilegedOperationExecutorMock)); TrafficControlBandwidthHandlerImpl handlerImpl = new TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock, cGroupsHandlerMock, trafficControllerSpy); try { handlerImpl.bootstrap(conf); testPreStart(trafficControllerSpy, handlerImpl); testPostComplete(trafficControllerSpy, handlerImpl); } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected ResourceHandlerException!"); } } private void testPreStart(TrafficController trafficControllerSpy, TrafficControlBandwidthHandlerImpl handlerImpl) throws ResourceHandlerException { //This is not the cleanest of solutions - but since we are testing the //preStart/postComplete lifecycle, we don't have a different way of //handling this - we don't keep track of the number of invocations by //a class we are not testing here (TrafficController) //So, we'll reset this mock. This is not a problem with other mocks. reset(privilegedOperationExecutorMock); doReturn(TEST_CLASSID).when(trafficControllerSpy).getNextClassId(); doReturn(TEST_CLASSID_STR).when(trafficControllerSpy) .getStringForNetClsClassId(TEST_CLASSID); when(cGroupsHandlerMock.getPathForCGroupTasks(CGroupsHandler .CGroupController.NET_CLS, TEST_CONTAINER_ID_STR)).thenReturn( TEST_TASKS_FILE); List<PrivilegedOperation> ops = handlerImpl.preStart(containerMock); //Ensure that cgroups is created and updated as expected verify(cGroupsHandlerMock).createCGroup( eq(CGroupsHandler.CGroupController.NET_CLS), eq(TEST_CONTAINER_ID_STR)); verify(cGroupsHandlerMock).updateCGroupParam( eq(CGroupsHandler.CGroupController.NET_CLS), eq(TEST_CONTAINER_ID_STR), eq(CGroupsHandler.CGROUP_PARAM_CLASSID), eq(TEST_CLASSID_STR)); //Now check the privileged operations being returned //We expect two operations - one for adding pid to tasks file and another //for a tc modify operation Assert.assertEquals(2, ops.size()); //Verify that the add pid op is correct PrivilegedOperation addPidOp = ops.get(0); String expectedAddPidOpArg = PrivilegedOperation.CGROUP_ARG_PREFIX + TEST_TASKS_FILE; List<String> addPidOpArgs = addPidOp.getArguments(); Assert.assertEquals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, addPidOp.getOperationType()); Assert.assertEquals(1, addPidOpArgs.size()); Assert.assertEquals(expectedAddPidOpArg, addPidOpArgs.get(0)); //Verify that that tc modify op is correct PrivilegedOperation tcModifyOp = ops.get(1); List<String> tcModifyOpArgs = tcModifyOp.getArguments(); Assert.assertEquals(PrivilegedOperation.OperationType.TC_MODIFY_STATE, tcModifyOp.getOperationType()); Assert.assertEquals(1, tcModifyOpArgs.size()); //verify that the tc command file exists Assert.assertTrue(new File(tcModifyOpArgs.get(0)).exists()); } private void testPostComplete(TrafficController trafficControllerSpy, TrafficControlBandwidthHandlerImpl handlerImpl) throws ResourceHandlerException { //This is not the cleanest of solutions - but since we are testing the //preStart/postComplete lifecycle, we don't have a different way of //handling this - we don't keep track of the number of invocations by //a class we are not testing here (TrafficController) //So, we'll reset this mock. This is not a problem with other mocks. reset(privilegedOperationExecutorMock); List<PrivilegedOperation> ops = handlerImpl.postComplete(containerIdMock); verify(cGroupsHandlerMock).deleteCGroup( eq(CGroupsHandler.CGroupController.NET_CLS), eq(TEST_CONTAINER_ID_STR)); try { //capture privileged op argument and ensure it is correct ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass (PrivilegedOperation.class); verify(privilegedOperationExecutorMock) .executePrivilegedOperation(opCaptor.capture(), eq(false)); List<String> args = opCaptor.getValue().getArguments(); Assert.assertEquals(PrivilegedOperation.OperationType.TC_MODIFY_STATE, opCaptor.getValue().getOperationType()); Assert.assertEquals(1, args.size()); //ensure that tc command file exists Assert.assertTrue(new File(args.get(0)).exists()); verify(trafficControllerSpy).releaseClassId(TEST_CLASSID); } catch (PrivilegedOperationException e) { LOG.error("Caught exception: " + e); Assert.fail("Unexpected PrivilegedOperationException from mock!"); } //We don't expect any operations to be returned here Assert.assertNull(ops); } @After public void teardown() { FileUtil.fullyDelete(new File(tmpPath)); } }
9,876
41.573276
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsBlkioResourceHandlerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.util.List; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; /** * Tests for the cgroups disk handler implementation. */ public class TestCGroupsBlkioResourceHandlerImpl { private CGroupsHandler mockCGroupsHandler; private CGroupsBlkioResourceHandlerImpl cGroupsBlkioResourceHandlerImpl; @Before public void setup() { mockCGroupsHandler = mock(CGroupsHandler.class); cGroupsBlkioResourceHandlerImpl = new CGroupsBlkioResourceHandlerImpl(mockCGroupsHandler); } @Test public void testBootstrap() throws Exception { Configuration conf = new YarnConfiguration(); List<PrivilegedOperation> ret = cGroupsBlkioResourceHandlerImpl.bootstrap(conf); verify(mockCGroupsHandler, times(1)).mountCGroupController( CGroupsHandler.CGroupController.BLKIO); Assert.assertNull(ret); } @Test public void testPreStart() throws Exception { String id = "container_01_01"; String path = "test-path/" + id; ContainerId mockContainerId = mock(ContainerId.class); when(mockContainerId.toString()).thenReturn(id); Container mockContainer = mock(Container.class); when(mockContainer.getContainerId()).thenReturn(mockContainerId); when( mockCGroupsHandler.getPathForCGroupTasks( CGroupsHandler.CGroupController.BLKIO, id)).thenReturn(path); List<PrivilegedOperation> ret = cGroupsBlkioResourceHandlerImpl.preStart(mockContainer); verify(mockCGroupsHandler, times(1)).createCGroup( CGroupsHandler.CGroupController.BLKIO, id); verify(mockCGroupsHandler, times(1)).updateCGroupParam( CGroupsHandler.CGroupController.BLKIO, id, CGroupsHandler.CGROUP_PARAM_BLKIO_WEIGHT, CGroupsBlkioResourceHandlerImpl.DEFAULT_WEIGHT); Assert.assertNotNull(ret); Assert.assertEquals(1, ret.size()); PrivilegedOperation op = ret.get(0); Assert.assertEquals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, op.getOperationType()); List<String> args = op.getArguments(); Assert.assertEquals(1, args.size()); Assert.assertEquals(PrivilegedOperation.CGROUP_ARG_PREFIX + path, args.get(0)); } @Test public void testReacquireContainer() throws Exception { ContainerId containerIdMock = mock(ContainerId.class); Assert.assertNull(cGroupsBlkioResourceHandlerImpl .reacquireContainer(containerIdMock)); } @Test public void testPostComplete() throws Exception { String id = "container_01_01"; ContainerId mockContainerId = mock(ContainerId.class); when(mockContainerId.toString()).thenReturn(id); Assert.assertNull(cGroupsBlkioResourceHandlerImpl .postComplete(mockContainerId)); verify(mockCGroupsHandler, times(1)).deleteCGroup( CGroupsHandler.CGroupController.BLKIO, id); } @Test public void testTeardown() throws Exception { Assert.assertNull(cGroupsBlkioResourceHandlerImpl.teardown()); } }
4,370
36.358974
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.util.List; public class TestResourceHandlerModule { private static final Log LOG = LogFactory. getLog(TestResourceHandlerModule.class); Configuration emptyConf; Configuration networkEnabledConf; @Before public void setup() throws Exception { emptyConf = new YarnConfiguration(); networkEnabledConf = new YarnConfiguration(); networkEnabledConf.setBoolean(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED, true); //We need to bypass mtab parsing for figuring out cgroups mount locations networkEnabledConf.setBoolean(YarnConfiguration .NM_LINUX_CONTAINER_CGROUPS_MOUNT, true); ResourceHandlerModule.nullifyResourceHandlerChain(); } @Test public void testOutboundBandwidthHandler() { try { //This resourceHandler should be non-null only if network as a resource //is explicitly enabled OutboundBandwidthResourceHandler resourceHandler = ResourceHandlerModule .getOutboundBandwidthResourceHandler(emptyConf); Assert.assertNull(resourceHandler); //When network as a resource is enabled this should be non-null resourceHandler = ResourceHandlerModule .getOutboundBandwidthResourceHandler(networkEnabledConf); Assert.assertNotNull(resourceHandler); //Ensure that outbound bandwidth resource handler is present in the chain ResourceHandlerChain resourceHandlerChain = ResourceHandlerModule .getConfiguredResourceHandlerChain(networkEnabledConf); List<ResourceHandler> resourceHandlers = resourceHandlerChain .getResourceHandlerList(); //Exactly one resource handler in chain Assert.assertEquals(resourceHandlers.size(), 1); //Same instance is expected to be in the chain. Assert.assertTrue(resourceHandlers.get(0) == resourceHandler); } catch (ResourceHandlerException e) { Assert.fail("Unexpected ResourceHandlerException: " + e); } } @Test public void testDiskResourceHandler() throws Exception { DiskResourceHandler handler = ResourceHandlerModule.getDiskResourceHandler(emptyConf); Assert.assertNull(handler); Configuration diskConf = new YarnConfiguration(); diskConf.setBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, true); handler = ResourceHandlerModule.getDiskResourceHandler(diskConf); Assert.assertNotNull(handler); ResourceHandlerChain resourceHandlerChain = ResourceHandlerModule.getConfiguredResourceHandlerChain(diskConf); List<ResourceHandler> resourceHandlers = resourceHandlerChain.getResourceHandlerList(); // Exactly one resource handler in chain Assert.assertEquals(resourceHandlers.size(), 1); // Same instance is expected to be in the chain. Assert.assertTrue(resourceHandlers.get(0) == handler); } }
3,991
38.137255
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Files; import java.util.Arrays; import java.util.List; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class TestTrafficController { private static final Log LOG = LogFactory.getLog(TestTrafficController.class); private static final int ROOT_BANDWIDTH_MBIT = 100; private static final int YARN_BANDWIDTH_MBIT = 70; private static final int CONTAINER_BANDWIDTH_MBIT = 10; //These constants are closely tied to the implementation of TrafficController //and will have to be modified in tandem with any related TrafficController //changes. private static final String DEVICE = "eth0"; private static final String WIPE_STATE_CMD = "qdisc del dev eth0 parent root"; private static final String ADD_ROOT_QDISC_CMD = "qdisc add dev eth0 root handle 42: htb default 2"; private static final String ADD_CGROUP_FILTER_CMD = "filter add dev eth0 parent 42: protocol ip prio 10 handle 1: cgroup"; private static final String ADD_ROOT_CLASS_CMD = "class add dev eth0 parent 42:0 classid 42:1 htb rate 100mbit ceil 100mbit"; private static final String ADD_DEFAULT_CLASS_CMD = "class add dev eth0 parent 42:1 classid 42:2 htb rate 30mbit ceil 100mbit"; private static final String ADD_YARN_CLASS_CMD = "class add dev eth0 parent 42:1 classid 42:3 htb rate 70mbit ceil 70mbit"; private static final String DEFAULT_TC_STATE_EXAMPLE = "qdisc pfifo_fast 0: root refcnt 2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1"; private static final String READ_QDISC_CMD = "qdisc show dev eth0"; private static final String READ_FILTER_CMD = "filter show dev eth0"; private static final String READ_CLASS_CMD = "class show dev eth0"; private static final int MIN_CONTAINER_CLASS_ID = 4; private static final String FORMAT_CONTAINER_CLASS_STR = "0x0042%04d"; private static final String FORMAT_ADD_CONTAINER_CLASS_TO_DEVICE = "class add dev eth0 parent 42:3 classid 42:%d htb rate 10mbit ceil %dmbit"; private static final String FORAMT_DELETE_CONTAINER_CLASS_FROM_DEVICE = "class del dev eth0 classid 42:%d"; private static final int TEST_CLASS_ID = 97; //decimal form of 0x00420097 - when reading a classid file, it is read out //as decimal private static final String TEST_CLASS_ID_DECIMAL_STR = "4325527"; private Configuration conf; private String tmpPath; private PrivilegedOperationExecutor privilegedOperationExecutorMock; @Before public void setup() { privilegedOperationExecutorMock = mock(PrivilegedOperationExecutor.class); conf = new YarnConfiguration(); tmpPath = new StringBuffer(System.getProperty("test.build.data")).append ('/').append("hadoop.tmp.dir").toString(); conf.set("hadoop.tmp.dir", tmpPath); } private void verifyTrafficControlOperation(PrivilegedOperation op, PrivilegedOperation.OperationType expectedOpType, List<String> expectedTcCmds) throws IOException { //Verify that the optype matches Assert.assertEquals(expectedOpType, op.getOperationType()); List<String> args = op.getArguments(); //Verify that arg count is always 1 (tc command file) for a tc operation Assert.assertEquals(1, args.size()); File tcCmdsFile = new File(args.get(0)); //Verify that command file exists Assert.assertTrue(tcCmdsFile.exists()); List<String> tcCmds = Files.readAllLines(tcCmdsFile.toPath(), Charset.forName("UTF-8")); //Verify that the number of commands is the same as expected and verify //that each command is the same, in sequence Assert.assertEquals(expectedTcCmds.size(), tcCmds.size()); for (int i = 0; i < tcCmds.size(); ++i) { Assert.assertEquals(expectedTcCmds.get(i), tcCmds.get(i)); } } @Test public void testBootstrapRecoveryDisabled() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass (PrivilegedOperation.class); //NM_RECOVERY_DISABLED - so we expect two privileged operation executions //one for wiping tc state - a second for initializing state verify(privilegedOperationExecutorMock, times(2)) .executePrivilegedOperation(opCaptor.capture(), eq(false)); //Now verify that the two operations were correct List<PrivilegedOperation> ops = opCaptor.getAllValues(); verifyTrafficControlOperation(ops.get(0), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(WIPE_STATE_CMD)); verifyTrafficControlOperation(ops.get(1), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD, ADD_ROOT_CLASS_CMD, ADD_DEFAULT_CLASS_CMD, ADD_YARN_CLASS_CMD)); } catch (ResourceHandlerException | PrivilegedOperationException | IOException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } } @Test public void testBootstrapRecoveryEnabled() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { //Return a default tc state when attempting to read state when(privilegedOperationExecutorMock.executePrivilegedOperation( any(PrivilegedOperation.class), eq(true))) .thenReturn(DEFAULT_TC_STATE_EXAMPLE); trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); ArgumentCaptor<PrivilegedOperation> readOpCaptor = ArgumentCaptor.forClass (PrivilegedOperation.class); //NM_RECOVERY_ENABLED - so we expect three privileged operation executions //1) read tc state 2) wipe tc state 3) init tc state //one for wiping tc state - a second for initializing state //First, verify read op verify(privilegedOperationExecutorMock, times(1)) .executePrivilegedOperation(readOpCaptor.capture(), eq(true)); List<PrivilegedOperation> readOps = readOpCaptor.getAllValues(); verifyTrafficControlOperation(readOps.get(0), PrivilegedOperation.OperationType.TC_READ_STATE, Arrays.asList(READ_QDISC_CMD, READ_FILTER_CMD, READ_CLASS_CMD)); ArgumentCaptor<PrivilegedOperation> writeOpCaptor = ArgumentCaptor .forClass(PrivilegedOperation.class); verify(privilegedOperationExecutorMock, times(2)) .executePrivilegedOperation(writeOpCaptor.capture(), eq(false)); //Now verify that the two write operations were correct List<PrivilegedOperation> writeOps = writeOpCaptor.getAllValues(); verifyTrafficControlOperation(writeOps.get(0), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(WIPE_STATE_CMD)); verifyTrafficControlOperation(writeOps.get(1), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD, ADD_ROOT_CLASS_CMD, ADD_DEFAULT_CLASS_CMD, ADD_YARN_CLASS_CMD)); } catch (ResourceHandlerException | PrivilegedOperationException | IOException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } } @Test public void testInvalidBuilder() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); try { //Invalid op type for TC batch builder TrafficController.BatchBuilder invalidBuilder = trafficController. new BatchBuilder( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP); Assert.fail("Invalid builder check failed!"); } catch (ResourceHandlerException e) { //expected } } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } } @Test public void testClassIdFileContentParsing() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); //Verify that classid file contents are parsed correctly //This call strips the QDISC prefix and returns the classid asociated with //the container int parsedClassId = trafficController.getClassIdFromFileContents (TEST_CLASS_ID_DECIMAL_STR); Assert.assertEquals(TEST_CLASS_ID, parsedClassId); } @Test public void testContainerOperations() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); int classId = trafficController.getNextClassId(); Assert.assertTrue(classId >= MIN_CONTAINER_CLASS_ID); Assert.assertEquals(String.format(FORMAT_CONTAINER_CLASS_STR, classId), trafficController.getStringForNetClsClassId(classId)); //Verify that the operation is setup correctly with strictMode = false TrafficController.BatchBuilder builder = trafficController. new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE) .addContainerClass(classId, CONTAINER_BANDWIDTH_MBIT, false); PrivilegedOperation addClassOp = builder.commitBatchToTempFile(); String expectedAddClassCmd = String.format (FORMAT_ADD_CONTAINER_CLASS_TO_DEVICE, classId, YARN_BANDWIDTH_MBIT); verifyTrafficControlOperation(addClassOp, PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(expectedAddClassCmd)); //Verify that the operation is setup correctly with strictMode = true TrafficController.BatchBuilder strictModeBuilder = trafficController. new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE) .addContainerClass(classId, CONTAINER_BANDWIDTH_MBIT, true); PrivilegedOperation addClassStrictModeOp = strictModeBuilder .commitBatchToTempFile(); String expectedAddClassStrictModeCmd = String.format (FORMAT_ADD_CONTAINER_CLASS_TO_DEVICE, classId, CONTAINER_BANDWIDTH_MBIT); verifyTrafficControlOperation(addClassStrictModeOp, PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(expectedAddClassStrictModeCmd)); TrafficController.BatchBuilder deleteBuilder = trafficController.new BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE) .deleteContainerClass(classId); PrivilegedOperation deleteClassOp = deleteBuilder.commitBatchToTempFile(); String expectedDeleteClassCmd = String.format (FORAMT_DELETE_CONTAINER_CLASS_FROM_DEVICE, classId); verifyTrafficControlOperation(deleteClassOp, PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(expectedDeleteClassCmd)); } catch (ResourceHandlerException | IOException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } } @After public void teardown() { FileUtil.fullyDelete(new File(tmpPath)); } }
13,796
41.064024
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
/* * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.nio.file.Files; import java.util.Map; import java.util.UUID; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; /** * Tests for the CGroups handler implementation. */ public class TestCGroupsHandlerImpl { private static final Log LOG = LogFactory.getLog(TestCGroupsHandlerImpl.class); private PrivilegedOperationExecutor privilegedOperationExecutorMock; private Configuration conf; private String tmpPath; private String hierarchy; private CGroupsHandler.CGroupController controller; private String controllerPath; @Before public void setup() { privilegedOperationExecutorMock = mock(PrivilegedOperationExecutor.class); conf = new YarnConfiguration(); tmpPath = System.getProperty("test.build.data") + "/cgroups"; //no leading or trailing slashes here hierarchy = "test-hadoop-yarn"; conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, hierarchy); conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT, true); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, tmpPath); controller = CGroupsHandler.CGroupController.NET_CLS; controllerPath = new StringBuffer(tmpPath).append('/') .append(controller.getName()).append('/').append(hierarchy).toString(); } @Test public void testMountController() { CGroupsHandler cGroupsHandler = null; //Since we enabled (deferred) cgroup controller mounting, no interactions //should have occurred, with this mock verifyZeroInteractions(privilegedOperationExecutorMock); try { cGroupsHandler = new CGroupsHandlerImpl(conf, privilegedOperationExecutorMock); PrivilegedOperation expectedOp = new PrivilegedOperation( PrivilegedOperation.OperationType.MOUNT_CGROUPS, (String) null); //This is expected to be of the form : //net_cls=<mount_path>/net_cls StringBuffer controllerKV = new StringBuffer(controller.getName()) .append('=').append(tmpPath).append('/').append(controller.getName()); expectedOp.appendArgs(hierarchy, controllerKV.toString()); cGroupsHandler.mountCGroupController(controller); try { ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass( PrivilegedOperation.class); verify(privilegedOperationExecutorMock) .executePrivilegedOperation(opCaptor.capture(), eq(false)); //we'll explicitly capture and assert that the //captured op and the expected op are identical. Assert.assertEquals(expectedOp, opCaptor.getValue()); verifyNoMoreInteractions(privilegedOperationExecutorMock); //Try mounting the same controller again - this should be a no-op cGroupsHandler.mountCGroupController(controller); verifyNoMoreInteractions(privilegedOperationExecutorMock); } catch (PrivilegedOperationException e) { LOG.error("Caught exception: " + e); Assert.assertTrue("Unexpected PrivilegedOperationException from mock!", false); } } catch (ResourceHandlerException e) { LOG.error("Caught exception: " + e); Assert.assertTrue("Unexpected ResourceHandler Exception!", false); } } @Test public void testCGroupPaths() { //As per junit behavior, we expect a new mock object to be available //in this test. verifyZeroInteractions(privilegedOperationExecutorMock); CGroupsHandler cGroupsHandler = null; try { cGroupsHandler = new CGroupsHandlerImpl(conf, privilegedOperationExecutorMock); cGroupsHandler.mountCGroupController(controller); } catch (ResourceHandlerException e) { LOG.error("Caught exception: " + e); Assert.assertTrue( "Unexpected ResourceHandlerException when mounting controller!", false); } String testCGroup = "container_01"; String expectedPath = new StringBuffer(controllerPath).append('/') .append(testCGroup).toString(); String path = cGroupsHandler.getPathForCGroup(controller, testCGroup); Assert.assertEquals(expectedPath, path); String expectedPathTasks = new StringBuffer(expectedPath).append('/') .append(CGroupsHandler.CGROUP_FILE_TASKS).toString(); path = cGroupsHandler.getPathForCGroupTasks(controller, testCGroup); Assert.assertEquals(expectedPathTasks, path); String param = CGroupsHandler.CGROUP_PARAM_CLASSID; String expectedPathParam = new StringBuffer(expectedPath).append('/') .append(controller.getName()).append('.').append(param).toString(); path = cGroupsHandler.getPathForCGroupParam(controller, testCGroup, param); Assert.assertEquals(expectedPathParam, path); } @Test public void testCGroupOperations() { //As per junit behavior, we expect a new mock object to be available //in this test. verifyZeroInteractions(privilegedOperationExecutorMock); CGroupsHandler cGroupsHandler = null; try { cGroupsHandler = new CGroupsHandlerImpl(conf, privilegedOperationExecutorMock); cGroupsHandler.mountCGroupController(controller); } catch (ResourceHandlerException e) { LOG.error("Caught exception: " + e); Assert.assertTrue( "Unexpected ResourceHandlerException when mounting controller!", false); } //Lets manually create a path to (partially) simulate a mounted controller //this is required because the handler uses a mocked privileged operation //executor new File(controllerPath).mkdirs(); String testCGroup = "container_01"; String expectedPath = new StringBuffer(controllerPath).append('/') .append(testCGroup).toString(); try { String path = cGroupsHandler.createCGroup(controller, testCGroup); Assert.assertTrue(new File(expectedPath).exists()); Assert.assertEquals(expectedPath, path); //update param and read param tests. //We don't use net_cls.classid because as a test param here because //cgroups provides very specific read/write semantics for classid (only //numbers can be written - potentially as hex but can be read out only //as decimal) String param = "test_param"; String paramValue = "test_param_value"; cGroupsHandler .updateCGroupParam(controller, testCGroup, param, paramValue); String paramPath = new StringBuffer(expectedPath).append('/') .append(controller.getName()).append('.').append(param).toString(); File paramFile = new File(paramPath); Assert.assertTrue(paramFile.exists()); try { Assert.assertEquals(paramValue, new String(Files.readAllBytes( paramFile.toPath()))); } catch (IOException e) { LOG.error("Caught exception: " + e); Assert.fail("Unexpected IOException trying to read cgroup param!"); } Assert.assertEquals(paramValue, cGroupsHandler.getCGroupParam(controller, testCGroup, param)); //We can't really do a delete test here. Linux cgroups //implementation provides additional semantics - the cgroup cannot be //deleted if there are any tasks still running in the cgroup even if //the user attempting the delete has the file permissions to do so - we //cannot simulate that here. Even if we create a dummy 'tasks' file, we //wouldn't be able to simulate the delete behavior we need, since a cgroup //can be deleted using using 'rmdir' if the tasks file is empty. Such a //delete is not possible with a regular non-empty directory. } catch (ResourceHandlerException e) { LOG.error("Caught exception: " + e); Assert .fail("Unexpected ResourceHandlerException during cgroup operations!"); } } public static File createMockCgroupMount(File parentDir, String type) throws IOException { return createMockCgroupMount(parentDir, type, "hadoop-yarn"); } public static File createMockCgroupMount(File parentDir, String type, String hierarchy) throws IOException { File cgroupMountDir = new File(parentDir.getAbsolutePath(), type + "/" + hierarchy); FileUtils.deleteQuietly(cgroupMountDir); if (!cgroupMountDir.mkdirs()) { String message = "Could not create dir " + cgroupMountDir.getAbsolutePath(); throw new IOException(message); } return cgroupMountDir; } public static File createMockMTab(File parentDir) throws IOException { String cpuMtabContent = "none " + parentDir.getAbsolutePath() + "/cpu cgroup rw,relatime,cpu 0 0\n"; String blkioMtabContent = "none " + parentDir.getAbsolutePath() + "/blkio cgroup rw,relatime,blkio 0 0\n"; File mockMtab = new File(parentDir, UUID.randomUUID().toString()); if (!mockMtab.exists()) { if (!mockMtab.createNewFile()) { String message = "Could not create file " + mockMtab.getAbsolutePath(); throw new IOException(message); } } FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile()); mtabWriter.write(cpuMtabContent); mtabWriter.write(blkioMtabContent); mtabWriter.close(); mockMtab.deleteOnExit(); return mockMtab; } @Test public void testMtabParsing() throws Exception { File parentDir = new File(tmpPath); // create mock cgroup File cpuCgroupMountDir = createMockCgroupMount(parentDir, "cpu", hierarchy); Assert.assertTrue(cpuCgroupMountDir.exists()); File blkioCgroupMountDir = createMockCgroupMount(parentDir, "blkio", hierarchy); Assert.assertTrue(blkioCgroupMountDir.exists()); File mockMtabFile = createMockMTab(parentDir); Map<CGroupsHandler.CGroupController, String> controllerPaths = CGroupsHandlerImpl.initializeControllerPathsFromMtab( mockMtabFile.getAbsolutePath(), hierarchy); Assert.assertEquals(2, controllerPaths.size()); Assert.assertTrue(controllerPaths .containsKey(CGroupsHandler.CGroupController.CPU)); Assert.assertTrue(controllerPaths .containsKey(CGroupsHandler.CGroupController.BLKIO)); String cpuDir = controllerPaths.get(CGroupsHandler.CGroupController.CPU); String blkioDir = controllerPaths.get(CGroupsHandler.CGroupController.BLKIO); Assert.assertEquals(parentDir.getAbsolutePath() + "/cpu", cpuDir); Assert.assertEquals(parentDir.getAbsolutePath() + "/blkio", blkioDir); } @After public void teardown() { FileUtil.fullyDelete(new File(tmpPath)); } }
12,426
39.878289
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.junit.matchers.JUnitMatchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.BufferedReader; import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.jar.JarFile; import java.util.jar.Manifest; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.ShellScriptBuilder; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.Test; public class TestContainerLaunch extends BaseContainerManagerTest { protected Context distContext = new NMContext(new NMContainerTokenSecretManager( conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService()) { public int getHttpPort() { return HTTP_PORT; }; public NodeId getNodeId() { return NodeId.newInstance("ahost", 1234); }; }; public TestContainerLaunch() throws UnsupportedFileSystemException { super(); } @Before public void setup() throws IOException { conf.setClass( YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class); super.setup(); } @Test public void testSpecialCharSymlinks() throws IOException { File shellFile = null; File tempFile = null; String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" : "foo@zz%_#*&!-+= bar()"; File symLinkFile = null; try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); tempFile = Shell.appendScriptExtension(tmpDir, "temp"); String timeoutCommand = Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\""; PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile, true); writer.println(timeoutCommand); writer.close(); Map<Path, List<String>> resources = new HashMap<Path, List<String>>(); Path path = new Path(shellFile.getAbsolutePath()); resources.put(path, Arrays.asList(badSymlink)); FileOutputStream fos = new FileOutputStream(tempFile); Map<String, String> env = new HashMap<String, String>(); List<String> commands = new ArrayList<String>(); if (Shell.WINDOWS) { commands.add("cmd"); commands.add("/c"); commands.add("\"" + badSymlink + "\""); } else { commands.add("/bin/sh ./\\\"" + badSymlink + "\\\""); } new DefaultContainerExecutor().writeLaunchEnv(fos, env, resources, commands); fos.flush(); fos.close(); FileUtil.setExecutable(tempFile, true); Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()}, tmpDir); shexc.execute(); assertEquals(shexc.getExitCode(), 0); assert(shexc.getOutput().contains("hello")); symLinkFile = new File(tmpDir, badSymlink); } finally { // cleanup if (shellFile != null && shellFile.exists()) { shellFile.delete(); } if (tempFile != null && tempFile.exists()) { tempFile.delete(); } if (symLinkFile != null && symLinkFile.exists()) { symLinkFile.delete(); } } } // test the diagnostics are generated @Test (timeout = 20000) public void testInvalidSymlinkDiagnostics() throws IOException { File shellFile = null; File tempFile = null; String symLink = Shell.WINDOWS ? "test.cmd" : "test"; File symLinkFile = null; try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); tempFile = Shell.appendScriptExtension(tmpDir, "temp"); String timeoutCommand = Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\""; PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile, true); writer.println(timeoutCommand); writer.close(); Map<Path, List<String>> resources = new HashMap<Path, List<String>>(); //This is an invalid path and should throw exception because of No such file. Path invalidPath = new Path(shellFile.getAbsolutePath()+"randomPath"); resources.put(invalidPath, Arrays.asList(symLink)); FileOutputStream fos = new FileOutputStream(tempFile); Map<String, String> env = new HashMap<String, String>(); List<String> commands = new ArrayList<String>(); if (Shell.WINDOWS) { commands.add("cmd"); commands.add("/c"); commands.add("\"" + symLink + "\""); } else { commands.add("/bin/sh ./\\\"" + symLink + "\\\""); } new DefaultContainerExecutor().writeLaunchEnv(fos, env, resources, commands); fos.flush(); fos.close(); FileUtil.setExecutable(tempFile, true); Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()}, tmpDir); String diagnostics = null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch(ExitCodeException e){ diagnostics = e.getMessage(); } Assert.assertNotNull(diagnostics); Assert.assertTrue(shexc.getExitCode() != 0); symLinkFile = new File(tmpDir, symLink); } finally { // cleanup if (shellFile != null && shellFile.exists()) { shellFile.delete(); } if (tempFile != null && tempFile.exists()) { tempFile.delete(); } if (symLinkFile != null && symLinkFile.exists()) { symLinkFile.delete(); } } } @Test (timeout = 20000) public void testInvalidEnvSyntaxDiagnostics() throws IOException { File shellFile = null; try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); Map<Path, List<String>> resources = new HashMap<Path, List<String>>(); FileOutputStream fos = new FileOutputStream(shellFile); FileUtil.setExecutable(shellFile, true); Map<String, String> env = new HashMap<String, String>(); // invalid env env.put( "APPLICATION_WORKFLOW_CONTEXT", "{\"workflowId\":\"609f91c5cd83\"," + "\"workflowName\":\"\n\ninsert table " + "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, " ); List<String> commands = new ArrayList<String>(); new DefaultContainerExecutor().writeLaunchEnv(fos, env, resources, commands); fos.flush(); fos.close(); // It is supposed that LANG is set as C. Map<String, String> cmdEnv = new HashMap<String, String>(); cmdEnv.put("LANG", "C"); Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()}, tmpDir, cmdEnv); String diagnostics = null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch(ExitCodeException e){ diagnostics = e.getMessage(); } Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? "is not recognized as an internal or external command" : "command not found")); Assert.assertTrue(shexc.getExitCode() != 0); } finally { // cleanup if (shellFile != null && shellFile.exists()) { shellFile.delete(); } } } @Test(timeout = 10000) public void testEnvExpansion() throws IOException { Path logPath = new Path("/nm/container/logs"); String input = Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*" + ApplicationConstants.CLASS_PATH_SEPARATOR + Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/lib/*" + ApplicationConstants.CLASS_PATH_SEPARATOR + Apps.crossPlatformify("HADOOP_LOG_HOME") + ApplicationConstants.LOG_DIR_EXPANSION_VAR; String res = ContainerLaunch.expandEnvironment(input, logPath); if (Shell.WINDOWS) { Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;" + "%HADOOP_HOME%/share/hadoop/common/lib/*;" + "%HADOOP_LOG_HOME%/nm/container/logs", res); } else { Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:" + "$HADOOP_HOME/share/hadoop/common/lib/*:" + "$HADOOP_LOG_HOME/nm/container/logs", res); } System.out.println(res); } @Test (timeout = 20000) public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException { File shellFile = null; try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); // echo "hello" to stdout and "error" to stderr and exit code with 2; String command = Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;"; PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile, true); writer.println(command); writer.close(); Map<Path, List<String>> resources = new HashMap<Path, List<String>>(); FileOutputStream fos = new FileOutputStream(shellFile, true); Map<String, String> env = new HashMap<String, String>(); List<String> commands = new ArrayList<String>(); commands.add(command); ContainerExecutor exec = new DefaultContainerExecutor(); exec.writeLaunchEnv(fos, env, resources, commands); fos.flush(); fos.close(); Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()}, tmpDir); String diagnostics = null; try { shexc.execute(); Assert.fail("Should catch exception"); } catch(ExitCodeException e){ diagnostics = e.getMessage(); } // test stderr Assert.assertTrue(diagnostics.contains("error")); // test stdout Assert.assertTrue(shexc.getOutput().contains("hello")); Assert.assertTrue(shexc.getExitCode() == 2); } finally { // cleanup if (shellFile != null && shellFile.exists()) { shellFile.delete(); } } } @Test public void testPrependDistcache() throws Exception { // Test is only relevant on Windows Assume.assumeTrue(Shell.WINDOWS); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); Map<String, String> userSetEnv = new HashMap<String, String>(); userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id"); userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST"); userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT"); userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT"); userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR"); userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key()); userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME"); userSetEnv.put(Environment.PWD.name(), "user_set_PWD"); userSetEnv.put(Environment.HOME.name(), "user_set_HOME"); userSetEnv.put(Environment.CLASSPATH.name(), "APATH"); containerLaunchContext.setEnvironment(userSetEnv); Container container = mock(Container.class); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(containerLaunchContext); when(container.getLocalizedResources()).thenReturn(null); Dispatcher dispatcher = mock(Dispatcher.class); EventHandler eventHandler = new EventHandler() { public void handle(Event event) { Assert.assertTrue(event instanceof ContainerExitEvent); ContainerExitEvent exitEvent = (ContainerExitEvent) event; Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitEvent.getType()); } }; when(dispatcher.getEventHandler()).thenReturn(eventHandler); Configuration conf = new Configuration(); ContainerLaunch launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager); String testDir = System.getProperty("test.build.data", "target/test-dir"); Path pwd = new Path(testDir); List<Path> appDirs = new ArrayList<Path>(); List<String> containerLogs = new ArrayList<String>(); Map<Path, List<String>> resources = new HashMap<Path, List<String>>(); Path userjar = new Path("user.jar"); List<String> lpaths = new ArrayList<String>(); lpaths.add("userjarlink.jar"); resources.put(userjar, lpaths); Path nmp = new Path(testDir); launch.sanitizeEnv( userSetEnv, pwd, appDirs, containerLogs, resources, nmp); List<String> result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name())); Assert.assertTrue(result.size() > 1); Assert.assertTrue( result.get(result.size() - 1).endsWith("userjarlink.jar")); //Then, with user classpath first userSetEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true"); cId = ContainerId.newContainerId(appAttemptId, 1); when(container.getContainerId()).thenReturn(cId); launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager); launch.sanitizeEnv( userSetEnv, pwd, appDirs, containerLogs, resources, nmp); result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name())); Assert.assertTrue(result.size() > 1); Assert.assertTrue( result.get(0).endsWith("userjarlink.jar")); } private static List<String> getJarManifestClasspath(String path) throws Exception { List<String> classpath = new ArrayList<String>(); JarFile jarFile = new JarFile(path); Manifest manifest = jarFile.getManifest(); String cps = manifest.getMainAttributes().getValue("Class-Path"); StringTokenizer cptok = new StringTokenizer(cps); while (cptok.hasMoreTokens()) { String cpentry = cptok.nextToken(); classpath.add(cpentry); } return classpath; } /** * See if environment variable is forwarded using sanitizeEnv. * @throws Exception */ @Test (timeout = 60000) public void testContainerEnvVariables() throws Exception { containerManager.start(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); Map<String, String> userSetEnv = new HashMap<String, String>(); userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id"); userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST"); userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT"); userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT"); userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR"); userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key()); userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME"); userSetEnv.put(Environment.PWD.name(), "user_set_PWD"); userSetEnv.put(Environment.HOME.name(), "user_set_HOME"); containerLaunchContext.setEnvironment(userSetEnv); File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile"); PrintWriter fileWriter = new PrintWriter(scriptFile); File processStartFile = new File(tmpDir, "env_vars.txt").getAbsoluteFile(); if (Shell.WINDOWS) { fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> " + processStartFile); fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.USER.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.PWD.$() + ">> " + processStartFile); fileWriter.println("@echo " + Environment.HOME.$() + ">> " + processStartFile); for (String serviceName : containerManager.getAuxServiceMetaData() .keySet()) { fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + "%>> " + processStartFile); } fileWriter.println("@echo " + cId + ">> " + processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); // So that start file is readable by the test fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > " + processStartFile); fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.USER.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.PWD.name() + " >> " + processStartFile); fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + processStartFile); for (String serviceName : containerManager.getAuxServiceMetaData() .keySet()) { fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + " >> " + processStartFile); } fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); // upload the script file so that the container can run it URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); // set up the rest of the container List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, Priority.newInstance(0), 0)); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs = 0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists()); // Now verify the contents of the file List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); List<Path> appDirs = new ArrayList<Path>(localDirs.size()); for (String localDir : localDirs) { Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, user); Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE); appDirs.add(new Path(appsdir, appId.toString())); } List<String> containerLogDirs = new ArrayList<String>(); String relativeContainerLogDir = ContainerLaunch .getRelativeContainerLogDir(appId.toString(), cId.toString()); for(String logDir : logDirs){ containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir); } BufferedReader reader = new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals(cId.toString(), reader.readLine()); Assert.assertEquals(context.getNodeId().getHost(), reader.readLine()); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), reader.readLine()); Assert.assertEquals(String.valueOf(HTTP_PORT), reader.readLine()); Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine()); Assert.assertEquals(user, reader.readLine()); Assert.assertEquals(user, reader.readLine()); String obtainedPWD = reader.readLine(); boolean found = false; for (Path localDir : appDirs) { if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) { found = true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found); Assert.assertEquals( conf.get( YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), reader.readLine()); for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) { Assert.assertEquals( containerManager.getAuxServiceMetaData().get(serviceName), ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes()))); } Assert.assertEquals(cId.toString(), containerLaunchContext .getEnvironment().get(Environment.CONTAINER_ID.name())); Assert.assertEquals(context.getNodeId().getHost(), containerLaunchContext .getEnvironment().get(Environment.NM_HOST.name())); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name())); Assert.assertEquals(String.valueOf(HTTP_PORT), containerLaunchContext .getEnvironment().get(Environment.NM_HTTP_PORT.name())); Assert.assertEquals(StringUtils.join(",", appDirs), containerLaunchContext .getEnvironment().get(Environment.LOCAL_DIRS.name())); Assert.assertEquals(StringUtils.join(",", containerLogDirs), containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name())); Assert.assertEquals(user, containerLaunchContext.getEnvironment() .get(Environment.USER.name())); Assert.assertEquals(user, containerLaunchContext.getEnvironment() .get(Environment.LOGNAME.name())); found = false; obtainedPWD = containerLaunchContext.getEnvironment().get(Environment.PWD.name()); for (Path localDir : appDirs) { if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) { found = true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found); Assert.assertEquals( conf.get( YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), containerLaunchContext.getEnvironment() .get(Environment.HOME.name())); // Get the pid of the process String pid = reader.readLine().trim(); // No more lines Assert.assertEquals(null, reader.readLine()); // Now test the stop functionality. // Assert that the process is alive Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid)); // Once more Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid)); // Now test the stop functionality. List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus()); // Assert that the process is not alive anymore Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid)); } @Test (timeout = 5000) public void testAuxiliaryServiceHelper() throws Exception { Map<String, String> env = new HashMap<String, String>(); String serviceName = "testAuxiliaryService"; ByteBuffer bb = ByteBuffer.wrap("testAuxiliaryService".getBytes()); AuxiliaryServiceHelper.setServiceDataIntoEnv(serviceName, bb, env); Assert.assertEquals(bb, AuxiliaryServiceHelper.getServiceDataFromEnv(serviceName, env)); } private void internalKillTest(boolean delayed) throws Exception { conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, delayed ? 1000 : 0); containerManager.start(); // ////// Construct the Container-id ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); File processStartFile = new File(tmpDir, "pid.txt").getAbsoluteFile(); // setup a script that can handle sigterm gracefully File scriptFile = Shell.appendScriptExtension(tmpDir, "testscript"); PrintWriter writer = new PrintWriter(new FileOutputStream(scriptFile)); if (Shell.WINDOWS) { writer.println("@echo \"Running testscript for delayed kill\""); writer.println("@echo \"Writing pid to start file\""); writer.println("@echo " + cId + "> " + processStartFile); writer.println("@ping -n 100 127.0.0.1 >nul"); } else { writer.println("#!/bin/bash\n\n"); writer.println("echo \"Running testscript for delayed kill\""); writer.println("hello=\"Got SIGTERM\""); writer.println("umask 0"); writer.println("trap \"echo $hello >> " + processStartFile + "\" SIGTERM"); writer.println("echo \"Writing pid to start file\""); writer.println("echo $$ >> " + processStartFile); writer.println("while true; do\nsleep 1s;\ndone"); } writer.close(); FileUtil.setExecutable(scriptFile, true); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // upload the script file so that the container can run it URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file.sh"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); // set up the rest of the container List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); Priority priority = Priority.newInstance(10); long createTime = 1234; Token containerToken = createContainerToken(cId, priority, createTime); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs = 0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists()); NMContainerStatus nmContainerStatus = containerManager.getContext().getContainers().get(cId) .getNMContainerStatus(); Assert.assertEquals(priority, nmContainerStatus.getPriority()); // Now test the stop functionality. List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); // if delayed container stop sends a sigterm followed by a sigkill // otherwise sigkill is sent immediately GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest) .getContainerStatuses().get(0); Assert.assertEquals(ContainerExitStatus.KILLED_BY_APPMASTER, containerStatus.getExitStatus()); // Now verify the contents of the file. Script generates a message when it // receives a sigterm so we look for that. We cannot perform this check on // Windows, because the process is not notified when killed by winutils. // There is no way for the process to trap and respond. Instead, we can // verify that the job object with ID matching container ID no longer exists. if (Shell.WINDOWS || !delayed) { Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(cId.toString())); } else { BufferedReader reader = new BufferedReader(new FileReader(processStartFile)); boolean foundSigTermMessage = false; while (true) { String line = reader.readLine(); if (line == null) { break; } if (line.contains("SIGTERM")) { foundSigTermMessage = true; break; } } Assert.assertTrue("Did not find sigterm message", foundSigTermMessage); reader.close(); } } @Test (timeout = 30000) public void testDelayedKill() throws Exception { internalKillTest(true); } @Test (timeout = 30000) public void testImmediateKill() throws Exception { internalKillTest(false); } @SuppressWarnings("rawtypes") @Test (timeout = 10000) public void testCallFailureWithNullLocalizedResources() { Container container = mock(Container.class); when(container.getContainerId()).thenReturn(ContainerId.newContainerId( ApplicationAttemptId.newInstance(ApplicationId.newInstance( System.currentTimeMillis(), 1), 1), 1)); ContainerLaunchContext clc = mock(ContainerLaunchContext.class); when(clc.getCommands()).thenReturn(Collections.<String>emptyList()); when(container.getLaunchContext()).thenReturn(clc); when(container.getLocalizedResources()).thenReturn(null); Dispatcher dispatcher = mock(Dispatcher.class); EventHandler eventHandler = new EventHandler() { public void handle(Event event) { Assert.assertTrue(event instanceof ContainerExitEvent); ContainerExitEvent exitEvent = (ContainerExitEvent) event; Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitEvent.getType()); } }; when(dispatcher.getEventHandler()).thenReturn(eventHandler); ContainerLaunch launch = new ContainerLaunch(context, new Configuration(), dispatcher, exec, null, container, dirsHandler, containerManager); launch.call(); } protected Token createContainerToken(ContainerId cId, Priority priority, long createTime) throws InvalidToken { Resource r = BuilderUtils.newResource(1024, 1); ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 10000L, 123, DUMMY_RM_IDENTIFIER, priority, createTime); Token containerToken = BuilderUtils.newContainerToken( context.getNodeId(), context.getContainerTokenSecretManager().retrievePassword( containerTokenIdentifier), containerTokenIdentifier); return containerToken; } /** * Test that script exists with non-zero exit code when command fails. * @throws IOException */ @Test (timeout = 10000) public void testShellScriptBuilderNonZeroExitCode() throws IOException { ShellScriptBuilder builder = ShellScriptBuilder.create(); builder.command(Arrays.asList(new String[] {"unknownCommand"})); File shellFile = Shell.appendScriptExtension(tmpDir, "testShellScriptBuilderError"); PrintStream writer = new PrintStream(new FileOutputStream(shellFile)); builder.write(writer); writer.close(); try { FileUtil.setExecutable(shellFile, true); Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor( new String[]{shellFile.getAbsolutePath()}, tmpDir); try { shexc.execute(); fail("builder shell command was expected to throw"); } catch(IOException e) { // expected System.out.println("Received an expected exception: " + e.getMessage()); } } finally { FileUtil.fullyDelete(shellFile); } } private static final String expectedMessage = "The command line has a length of"; @Test (timeout = 10000) public void testWindowsShellScriptBuilderCommand() throws IOException { String callCmd = "@call "; // Test is only relevant on Windows Assume.assumeTrue(Shell.WINDOWS); // The tests are built on assuming 8191 max command line length assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder = ShellScriptBuilder.create(); // Basic tests: less length, exact length, max+1 length builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat("A", 1024))); builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat( "E", Shell.WINDOWS_MAX_SHELL_LENGHT - callCmd.length()))); try { builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat( "X", Shell.WINDOWS_MAX_SHELL_LENGHT -callCmd.length() + 1))); fail("longCommand was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), containsString(expectedMessage)); } // Composite tests, from parts: less, exact and + builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat("A", 1024), org.apache.commons.lang.StringUtils.repeat("A", 1024), org.apache.commons.lang.StringUtils.repeat("A", 1024))); // buildr.command joins the command parts with an extra space builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat("E", 4095), org.apache.commons.lang.StringUtils.repeat("E", 2047), org.apache.commons.lang.StringUtils.repeat("E", 2047 - callCmd.length()))); try { builder.command(Arrays.asList( org.apache.commons.lang.StringUtils.repeat("X", 4095), org.apache.commons.lang.StringUtils.repeat("X", 2047), org.apache.commons.lang.StringUtils.repeat("X", 2048 - callCmd.length()))); fail("long commands was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), containsString(expectedMessage)); } } @Test (timeout = 10000) public void testWindowsShellScriptBuilderEnv() throws IOException { // Test is only relevant on Windows Assume.assumeTrue(Shell.WINDOWS); // The tests are built on assuming 8191 max command line length assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder = ShellScriptBuilder.create(); // test env builder.env("somekey", org.apache.commons.lang.StringUtils.repeat("A", 1024)); builder.env("somekey", org.apache.commons.lang.StringUtils.repeat( "A", Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length())); try { builder.env("somekey", org.apache.commons.lang.StringUtils.repeat( "A", Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length()) + 1); fail("long env was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), containsString(expectedMessage)); } } @Test (timeout = 10000) public void testWindowsShellScriptBuilderMkdir() throws IOException { String mkDirCmd = "@if not exist \"\" mkdir \"\""; // Test is only relevant on Windows Assume.assumeTrue(Shell.WINDOWS); // The tests are built on assuming 8191 max command line length assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder = ShellScriptBuilder.create(); // test mkdir builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024))); builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat( "E", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2))); try { builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat( "X", (Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length())/2 +1))); fail("long mkdir was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), containsString(expectedMessage)); } } @Test (timeout = 10000) public void testWindowsShellScriptBuilderLink() throws IOException { // Test is only relevant on Windows Assume.assumeTrue(Shell.WINDOWS); String linkCmd = "@" +Shell.WINUTILS + " symlink \"\" \"\""; // The tests are built on assuming 8191 max command line length assertEquals(8191, Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder = ShellScriptBuilder.create(); // test link builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("A", 1024)), new Path(org.apache.commons.lang.StringUtils.repeat("B", 1024))); builder.link( new Path(org.apache.commons.lang.StringUtils.repeat( "E", (Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length())/2)), new Path(org.apache.commons.lang.StringUtils.repeat( "F", (Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length())/2))); try { builder.link( new Path(org.apache.commons.lang.StringUtils.repeat( "X", (Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length())/2 + 1)), new Path(org.apache.commons.lang.StringUtils.repeat( "Y", (Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length())/2) + 1)); fail("long link was expected to throw"); } catch(IOException e) { assertThat(e.getMessage(), containsString(expectedMessage)); } } @Test public void testKillProcessGroup() throws Exception { Assume.assumeTrue(Shell.isSetsidAvailable); containerManager.start(); // Construct the Container-id ApplicationId appId = ApplicationId.newInstance(2, 2); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); File processStartFile = new File(tmpDir, "pid.txt").getAbsoluteFile(); File childProcessStartFile = new File(tmpDir, "child_pid.txt").getAbsoluteFile(); // setup a script that can handle sigterm gracefully File scriptFile = Shell.appendScriptExtension(tmpDir, "testscript"); PrintWriter writer = new PrintWriter(new FileOutputStream(scriptFile)); writer.println("#!/bin/bash\n\n"); writer.println("echo \"Running testscript for forked process\""); writer.println("umask 0"); writer.println("echo $$ >> " + processStartFile); writer.println("while true;\ndo sleep 1s;\ndone > /dev/null 2>&1 &"); writer.println("echo $! >> " + childProcessStartFile); writer.println("while true;\ndo sleep 1s;\ndone"); writer.close(); FileUtil.setExecutable(scriptFile, true); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // upload the script file so that the container can run it URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file.sh"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); // set up the rest of the container List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); Priority priority = Priority.newInstance(10); long createTime = 1234; Token containerToken = createContainerToken(cId, priority, createTime); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs = 0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists()); BufferedReader reader = new BufferedReader(new FileReader(processStartFile)); // Get the pid of the process String pid = reader.readLine().trim(); // No more lines Assert.assertEquals(null, reader.readLine()); reader.close(); reader = new BufferedReader(new FileReader(childProcessStartFile)); // Get the pid of the child process String child = reader.readLine().trim(); // No more lines Assert.assertEquals(null, reader.readLine()); reader.close(); LOG.info("Manually killing pid " + pid + ", but not child pid " + child); Shell.execCommand(new String[]{"kill", "-9", pid}); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid)); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest) .getContainerStatuses().get(0); Assert.assertEquals(ExitCode.FORCE_KILLED.getExitCode(), containerStatus.getExitStatus()); } }
49,561
39.859027
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerHeartbeatResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; public class MockLocalizerHeartbeatResponse implements LocalizerHeartbeatResponse { LocalizerAction action; List<ResourceLocalizationSpec> resourceSpecs; MockLocalizerHeartbeatResponse() { resourceSpecs = new ArrayList<ResourceLocalizationSpec>(); } MockLocalizerHeartbeatResponse( LocalizerAction action, List<ResourceLocalizationSpec> resources) { this.action = action; this.resourceSpecs = resources; } public LocalizerAction getLocalizerAction() { return action; } public void setLocalizerAction(LocalizerAction action) { this.action = action; } @Override public List<ResourceLocalizationSpec> getResourceSpecs() { return resourceSpecs; } @Override public void setResourceSpecs(List<ResourceLocalizationSpec> resourceSpecs) { this.resourceSpecs = resourceSpecs; } }
2,023
33.896552
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.junit.Test; import static org.junit.Assert.*; import org.mockito.ArgumentCaptor; import static org.mockito.Mockito.*; public class TestResourceRetention { @Test public void testRsrcUnused() { DeletionService delService = mock(DeletionService.class); long TARGET_MB = 10 << 20; ResourceRetentionSet rss = new ResourceRetentionSet(delService, TARGET_MB); // 3MB files @{10, 15} LocalResourcesTracker pubTracker = createMockTracker(null, 3 * 1024 * 1024, 2, 10, 5); // 1MB files @{3, 6, 9, 12} LocalResourcesTracker trackerA = createMockTracker("A", 1 * 1024 * 1024, 4, 3, 3); // 4MB file @{1} LocalResourcesTracker trackerB = createMockTracker("B", 4 * 1024 * 1024, 1, 10, 5); // 2MB files @{7, 9, 11} LocalResourcesTracker trackerC = createMockTracker("C", 2 * 1024 * 1024, 3, 7, 2); // Total cache: 20MB; verify removed at least 10MB rss.addResources(pubTracker); rss.addResources(trackerA); rss.addResources(trackerB); rss.addResources(trackerC); long deleted = 0L; ArgumentCaptor<LocalizedResource> captor = ArgumentCaptor.forClass(LocalizedResource.class); verify(pubTracker, atMost(2)) .remove(captor.capture(), isA(DeletionService.class)); verify(trackerA, atMost(4)) .remove(captor.capture(), isA(DeletionService.class)); verify(trackerB, atMost(1)) .remove(captor.capture(), isA(DeletionService.class)); verify(trackerC, atMost(3)) .remove(captor.capture(), isA(DeletionService.class)); for (LocalizedResource rem : captor.getAllValues()) { deleted += rem.getSize(); } assertTrue(deleted >= 10 * 1024 * 1024); assertTrue(deleted < 15 * 1024 * 1024); } LocalResourcesTracker createMockTracker(String user, final long rsrcSize, long nRsrcs, long timestamp, long tsstep) { Configuration conf = new Configuration(); ConcurrentMap<LocalResourceRequest,LocalizedResource> trackerResources = new ConcurrentHashMap<LocalResourceRequest,LocalizedResource>(); LocalResourcesTracker ret = spy(new LocalResourcesTrackerImpl(user, null, null, trackerResources, false, conf, new NMNullStateStoreService())); for (int i = 0; i < nRsrcs; ++i) { final LocalResourceRequest req = new LocalResourceRequest( new Path("file:///" + user + "/rsrc" + i), timestamp + i * tsstep, LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, null); final long ts = timestamp + i * tsstep; final Path p = new Path("file:///local/" + user + "/rsrc" + i); LocalizedResource rsrc = new LocalizedResource(req, null) { @Override public int getRefCount() { return 0; } @Override public long getSize() { return rsrcSize; } @Override public Path getLocalPath() { return p; } @Override public long getTimestamp() { return ts; } @Override public ResourceState getState() { return ResourceState.LOCALIZED; } }; trackerResources.put(req, rsrc); } return ret; } }
4,398
40.11215
82
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.net.URISyntaxException; import java.util.Random; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import org.apache.hadoop.yarn.util.ConverterUtils; import static org.apache.hadoop.yarn.api.records.LocalResourceType.*; import static org.apache.hadoop.yarn.api.records.LocalResourceVisibility.*; import org.junit.Test; import static org.junit.Assert.*; public class TestLocalResource { static org.apache.hadoop.yarn.api.records.LocalResource getYarnResource(Path p, long size, long timestamp, LocalResourceType type, LocalResourceVisibility state, String pattern) throws URISyntaxException { org.apache.hadoop.yarn.api.records.LocalResource ret = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.LocalResource.class); ret.setResource(ConverterUtils.getYarnUrlFromURI(p.toUri())); ret.setSize(size); ret.setTimestamp(timestamp); ret.setType(type); ret.setVisibility(state); ret.setPattern(pattern); return ret; } static void checkEqual(LocalResourceRequest a, LocalResourceRequest b) { assertEquals(a, b); assertEquals(a.hashCode(), b.hashCode()); assertEquals(0, a.compareTo(b)); assertEquals(0, b.compareTo(a)); } static void checkNotEqual(LocalResourceRequest a, LocalResourceRequest b) { assertFalse(a.equals(b)); assertFalse(b.equals(a)); assertFalse(a.hashCode() == b.hashCode()); assertFalse(0 == a.compareTo(b)); assertFalse(0 == b.compareTo(a)); } @Test public void testResourceEquality() throws URISyntaxException { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("SEED: " + seed); long basetime = r.nextLong() >>> 2; org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, null); org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, null); final LocalResourceRequest a = new LocalResourceRequest(yA); LocalResourceRequest b = new LocalResourceRequest(yA); checkEqual(a, b); b = new LocalResourceRequest(yB); checkEqual(a, b); // ignore visibility yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PRIVATE, null); b = new LocalResourceRequest(yB); checkEqual(a, b); // ignore size yB = getYarnResource( new Path("http://yak.org:80/foobar"), 0, basetime, FILE, PRIVATE, null); b = new LocalResourceRequest(yB); checkEqual(a, b); // note path yB = getYarnResource( new Path("hdfs://dingo.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC, null); b = new LocalResourceRequest(yB); checkNotEqual(a, b); // note type yB = getYarnResource( new Path("http://yak.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC, null); b = new LocalResourceRequest(yB); checkNotEqual(a, b); // note timestamp yB = getYarnResource( new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC, null); b = new LocalResourceRequest(yB); checkNotEqual(a, b); // note pattern yB = getYarnResource( new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC, "^/foo/.*"); b = new LocalResourceRequest(yB); checkNotEqual(a, b); } @Test public void testResourceOrder() throws URISyntaxException { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("SEED: " + seed); long basetime = r.nextLong() >>> 2; org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, "^/foo/.*"); final LocalResourceRequest a = new LocalResourceRequest(yA); // Path primary org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource( new Path("http://yak.org:80/foobaz"), -1, basetime, FILE, PUBLIC, "^/foo/.*"); LocalResourceRequest b = new LocalResourceRequest(yB); assertTrue(0 > a.compareTo(b)); // timestamp secondary yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime + 1, FILE, PUBLIC, "^/foo/.*"); b = new LocalResourceRequest(yB); assertTrue(0 > a.compareTo(b)); // type tertiary yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, "^/foo/.*"); b = new LocalResourceRequest(yB); assertTrue(0 != a.compareTo(b)); // don't care about order, just ne // path 4th yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, "^/food/.*"); b = new LocalResourceRequest(yB); assertTrue(0 != a.compareTo(b)); // don't care about order, just ne yB = getYarnResource( new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, null); b = new LocalResourceRequest(yB); assertTrue(0 != a.compareTo(b)); // don't care about order, just ne } }
6,269
37.231707
178
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalResourceStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType; public class MockLocalResourceStatus implements LocalResourceStatus { private LocalResource rsrc = null; private ResourceStatusType tag = null; private URL localPath = null; private long size = -1L; private SerializedException ex = null; MockLocalResourceStatus() { } MockLocalResourceStatus(LocalResource rsrc, ResourceStatusType tag, URL localPath, SerializedException ex) { this.rsrc = rsrc; this.tag = tag; this.localPath = localPath; this.ex = ex; } @Override public LocalResource getResource() { return rsrc; } @Override public ResourceStatusType getStatus() { return tag; } @Override public long getLocalSize() { return size; } @Override public URL getLocalPath() { return localPath; } @Override public SerializedException getException() { return ex; } @Override public void setResource(LocalResource rsrc) { this.rsrc = rsrc; } @Override public void setStatus(ResourceStatusType tag) { this.tag = tag; } @Override public void setLocalPath(URL localPath) { this.localPath = localPath; } @Override public void setLocalSize(long size) { this.size = size; } @Override public void setException(SerializedException ex) { this.ex = ex; } @Override public boolean equals(Object o) { if (!(o instanceof MockLocalResourceStatus)) { return false; } MockLocalResourceStatus other = (MockLocalResourceStatus) o; return getResource().equals(other.getResource()) && getStatus().equals(other.getStatus()) && (null != getLocalPath() && getLocalPath().equals(other.getLocalPath())) && (null != getException() && getException().equals(other.getException())); } @Override public int hashCode() { return 4344; } }
2,977
34.879518
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.isA; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.Test; import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class TestContainerLocalizer { static final Log LOG = LogFactory.getLog(TestContainerLocalizer.class); static final Path basedir = new Path("target", TestContainerLocalizer.class.getName()); static final FsPermission CACHE_DIR_PERM = new FsPermission((short)0710); static final String appUser = "yak"; static final String appId = "app_RM_0"; static final String containerId = "container_0"; static final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040); private AbstractFileSystem spylfs; private Random random; private List<Path> localDirs; private Path tokenPath; private LocalizationProtocol nmProxy; @Test public void testContainerLocalizerMain() throws Exception { FileContext fs = FileContext.getLocalFSFileContext(); spylfs = spy(fs.getDefaultFileSystem()); ContainerLocalizer localizer = setupContainerLocalizerForTest(); // verify created cache List<Path> privCacheList = new ArrayList<Path>(); List<Path> appCacheList = new ArrayList<Path>(); for (Path p : localDirs) { Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser); Path privcache = new Path(base, ContainerLocalizer.FILECACHE); privCacheList.add(privcache); Path appDir = new Path(base, new Path(ContainerLocalizer.APPCACHE, appId)); Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE); appCacheList.add(appcache); } // mock heartbeat responses from NM ResourceLocalizationSpec rsrcA = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); ResourceLocalizationSpec rsrcB = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); ResourceLocalizationSpec rsrcC = getMockRsrc(random, LocalResourceVisibility.APPLICATION, appCacheList.get(0)); ResourceLocalizationSpec rsrcD = getMockRsrc(random, LocalResourceVisibility.PRIVATE, privCacheList.get(0)); when(nmProxy.heartbeat(isA(LocalizerStatus.class))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcA))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcB))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcC))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.singletonList(rsrcD))) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE, Collections.<ResourceLocalizationSpec>emptyList())) .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE, null)); LocalResource tRsrcA = rsrcA.getResource(); LocalResource tRsrcB = rsrcB.getResource(); LocalResource tRsrcC = rsrcC.getResource(); LocalResource tRsrcD = rsrcD.getResource(); doReturn( new FakeDownload(rsrcA.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcA), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcB.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcB), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcC.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcC), isA(UserGroupInformation.class)); doReturn( new FakeDownload(rsrcD.getResource().getResource().getFile(), true)) .when(localizer).download(isA(Path.class), eq(tRsrcD), isA(UserGroupInformation.class)); // run localization assertEquals(0, localizer.runLocalization(nmAddr)); for (Path p : localDirs) { Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser); Path privcache = new Path(base, ContainerLocalizer.FILECACHE); // $x/usercache/$user/filecache verify(spylfs).mkdir(eq(privcache), eq(CACHE_DIR_PERM), eq(false)); Path appDir = new Path(base, new Path(ContainerLocalizer.APPCACHE, appId)); // $x/usercache/$user/appcache/$appId/filecache Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE); verify(spylfs).mkdir(eq(appcache), eq(CACHE_DIR_PERM), eq(false)); } // verify tokens read at expected location verify(spylfs).open(tokenPath); // verify downloaded resources reported to NM verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcA.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcB.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcC.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcD.getResource()))); // verify all HB use localizerID provided verify(nmProxy, never()).heartbeat(argThat( new ArgumentMatcher<LocalizerStatus>() { @Override public boolean matches(Object o) { LocalizerStatus status = (LocalizerStatus) o; return !containerId.equals(status.getLocalizerId()); } })); } @Test @SuppressWarnings("unchecked") public void testLocalizerTokenIsGettingRemoved() throws Exception { FileContext fs = FileContext.getLocalFSFileContext(); spylfs = spy(fs.getDefaultFileSystem()); ContainerLocalizer localizer = setupContainerLocalizerForTest(); doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class), any(CompletionService.class), any(UserGroupInformation.class)); localizer.runLocalization(nmAddr); verify(spylfs, times(1)).delete(tokenPath, false); } @Test @SuppressWarnings("unchecked") // mocked generics public void testContainerLocalizerClosesFilesystems() throws Exception { // verify filesystems are closed when localizer doesn't fail FileContext fs = FileContext.getLocalFSFileContext(); spylfs = spy(fs.getDefaultFileSystem()); ContainerLocalizer localizer = setupContainerLocalizerForTest(); doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class), any(CompletionService.class), any(UserGroupInformation.class)); verify(localizer, never()).closeFileSystems( any(UserGroupInformation.class)); localizer.runLocalization(nmAddr); verify(localizer).closeFileSystems(any(UserGroupInformation.class)); spylfs = spy(fs.getDefaultFileSystem()); // verify filesystems are closed when localizer fails localizer = setupContainerLocalizerForTest(); doThrow(new YarnRuntimeException("Forced Failure")).when(localizer).localizeFiles( any(LocalizationProtocol.class), any(CompletionService.class), any(UserGroupInformation.class)); verify(localizer, never()).closeFileSystems( any(UserGroupInformation.class)); localizer.runLocalization(nmAddr); verify(localizer).closeFileSystems(any(UserGroupInformation.class)); } @SuppressWarnings("unchecked") // mocked generics private ContainerLocalizer setupContainerLocalizerForTest() throws Exception { // don't actually create dirs doNothing().when(spylfs).mkdir( isA(Path.class), isA(FsPermission.class), anyBoolean()); Configuration conf = new Configuration(); FileContext lfs = FileContext.getFileContext(spylfs, conf); localDirs = new ArrayList<Path>(); for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); } RecordFactory mockRF = getMockLocalizerRecordFactory(); ContainerLocalizer concreteLoc = new ContainerLocalizer(lfs, appUser, appId, containerId, localDirs, mockRF); ContainerLocalizer localizer = spy(concreteLoc); // return credential stream instead of opening local file random = new Random(); long seed = random.nextLong(); System.out.println("SEED: " + seed); random.setSeed(seed); DataInputBuffer appTokens = createFakeCredentials(random, 10); tokenPath = lfs.makeQualified(new Path( String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, containerId))); doReturn(new FSDataInputStream(new FakeFSDataInputStream(appTokens)) ).when(spylfs).open(tokenPath); nmProxy = mock(LocalizationProtocol.class); doReturn(nmProxy).when(localizer).getProxy(nmAddr); doNothing().when(localizer).sleep(anyInt()); // return result instantly for deterministic test ExecutorService syncExec = mock(ExecutorService.class); CompletionService<Path> cs = mock(CompletionService.class); when(cs.submit(isA(Callable.class))) .thenAnswer(new Answer<Future<Path>>() { @Override public Future<Path> answer(InvocationOnMock invoc) throws Throwable { Future<Path> done = mock(Future.class); when(done.isDone()).thenReturn(true); FakeDownload d = (FakeDownload) invoc.getArguments()[0]; when(done.get()).thenReturn(d.call()); return done; } }); doReturn(syncExec).when(localizer).createDownloadThreadPool(); doReturn(cs).when(localizer).createCompletionService(syncExec); return localizer; } static class HBMatches extends ArgumentMatcher<LocalizerStatus> { final LocalResource rsrc; HBMatches(LocalResource rsrc) { this.rsrc = rsrc; } @Override public boolean matches(Object o) { LocalizerStatus status = (LocalizerStatus) o; for (LocalResourceStatus localized : status.getResources()) { switch (localized.getStatus()) { case FETCH_SUCCESS: if (localized.getLocalPath().getFile().contains( rsrc.getResource().getFile())) { return true; } break; default: fail("Unexpected: " + localized.getStatus()); break; } } return false; } } static class FakeDownload implements Callable<Path> { private final Path localPath; private final boolean succeed; FakeDownload(String absPath, boolean succeed) { this.localPath = new Path("file:///localcache" + absPath); this.succeed = succeed; } @Override public Path call() throws IOException { if (!succeed) { throw new IOException("FAIL " + localPath); } return localPath; } } static RecordFactory getMockLocalizerRecordFactory() { RecordFactory mockRF = mock(RecordFactory.class); when(mockRF.newRecordInstance(same(LocalResourceStatus.class))) .thenAnswer(new Answer<LocalResourceStatus>() { @Override public LocalResourceStatus answer(InvocationOnMock invoc) throws Throwable { return new MockLocalResourceStatus(); } }); when(mockRF.newRecordInstance(same(LocalizerStatus.class))) .thenAnswer(new Answer<LocalizerStatus>() { @Override public LocalizerStatus answer(InvocationOnMock invoc) throws Throwable { return new MockLocalizerStatus(); } }); return mockRF; } static ResourceLocalizationSpec getMockRsrc(Random r, LocalResourceVisibility vis, Path p) { ResourceLocalizationSpec resourceLocalizationSpec = mock(ResourceLocalizationSpec.class); LocalResource rsrc = mock(LocalResource.class); String name = Long.toHexString(r.nextLong()); URL uri = mock(org.apache.hadoop.yarn.api.records.URL.class); when(uri.getScheme()).thenReturn("file"); when(uri.getHost()).thenReturn(null); when(uri.getFile()).thenReturn("/local/" + vis + "/" + name); when(rsrc.getResource()).thenReturn(uri); when(rsrc.getSize()).thenReturn(r.nextInt(1024) + 1024L); when(rsrc.getTimestamp()).thenReturn(r.nextInt(1024) + 2048L); when(rsrc.getType()).thenReturn(LocalResourceType.FILE); when(rsrc.getVisibility()).thenReturn(vis); when(resourceLocalizationSpec.getResource()).thenReturn(rsrc); when(resourceLocalizationSpec.getDestinationDirectory()). thenReturn(ConverterUtils.getYarnUrlFromPath(p)); return resourceLocalizationSpec; } @SuppressWarnings({ "rawtypes", "unchecked" }) static DataInputBuffer createFakeCredentials(Random r, int nTok) throws IOException { Credentials creds = new Credentials(); byte[] password = new byte[20]; Text kind = new Text(); Text service = new Text(); Text alias = new Text(); for (int i = 0; i < nTok; ++i) { byte[] identifier = ("idef" + i).getBytes(); r.nextBytes(password); kind.set("kind" + i); service.set("service" + i); alias.set("token" + i); Token token = new Token(identifier, password, kind, service); creds.addToken(alias, token); } DataOutputBuffer buf = new DataOutputBuffer(); creds.writeTokenStorageToStream(buf); DataInputBuffer ret = new DataInputBuffer(); ret.reset(buf.getData(), 0, buf.getLength()); return ret; } }
16,669
39.958231
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyShort; import static org.mockito.Matchers.any; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.isA; import static org.mockito.Matchers.isNull; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; import java.lang.reflect.Constructor; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Future; import org.apache.hadoop.fs.Options; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.junit.Assert; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalResourceStatusPBImpl; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.LocalizerRunner; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.LocalizerTracker; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.PublicLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceFailedLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class TestResourceLocalizationService { static final Path basedir = new Path("target", TestResourceLocalizationService.class.getName()); static Server mockServer; private Configuration conf; private AbstractFileSystem spylfs; private FileContext lfs; private NMContext nmContext; @BeforeClass public static void setupClass() { mockServer = mock(Server.class); doReturn(new InetSocketAddress(123)).when(mockServer).getListenerAddress(); } @Before public void setup() throws IOException { conf = new Configuration(); spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem()); lfs = FileContext.getFileContext(spylfs, conf); String logDir = lfs.makeQualified(new Path(basedir, "logdir ")).toString(); conf.set(YarnConfiguration.NM_LOG_DIRS, logDir); nmContext = new NMContext(new NMContainerTokenSecretManager( conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService()); } @After public void cleanup() throws IOException { conf = null; try { FileUtils.deleteDirectory(new File(basedir.toString())); } catch (IOException e) { // ignore } } @Test public void testLocalizationInit() throws Exception { conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(new Configuration()); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = spy(new DeletionService(exec)); delService.init(conf); delService.start(); List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalDirsHandlerService diskhandler = new LocalDirsHandlerService(); diskhandler.init(conf); ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext)); doReturn(lfs) .when(locService).getLocalFileContext(isA(Configuration.class)); try { dispatcher.start(); // initialize ResourceLocalizationService locService.init(conf); final FsPermission defaultPerm = new FsPermission((short)0755); // verify directory creation for (Path p : localDirs) { p = new Path((new URI(p.toString())).getPath()); Path usercache = new Path(p, ContainerLocalizer.USERCACHE); verify(spylfs) .mkdir(eq(usercache), eq(defaultPerm), eq(true)); Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); verify(spylfs) .mkdir(eq(publicCache), eq(defaultPerm), eq(true)); Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR); verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true)); } } finally { dispatcher.stop(); delService.stop(); } } @Test public void testDirectoryCleanupOnNewlyCreatedStateStore() throws IOException, URISyntaxException { conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(new Configuration()); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = spy(new DeletionService(exec)); delService.init(conf); delService.start(); List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalDirsHandlerService diskhandler = new LocalDirsHandlerService(); diskhandler.init(conf); NMStateStoreService nmStateStoreService = mock(NMStateStoreService.class); when(nmStateStoreService.canRecover()).thenReturn(true); when(nmStateStoreService.isNewlyCreated()).thenReturn(true); ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler,nmContext)); doReturn(lfs) .when(locService).getLocalFileContext(isA(Configuration.class)); try { dispatcher.start(); // initialize ResourceLocalizationService locService.init(conf); final FsPermission defaultPerm = new FsPermission((short)0755); // verify directory creation for (Path p : localDirs) { p = new Path((new URI(p.toString())).getPath()); Path usercache = new Path(p, ContainerLocalizer.USERCACHE); verify(spylfs) .rename(eq(usercache), any(Path.class), any(Options.Rename.class)); verify(spylfs) .mkdir(eq(usercache), eq(defaultPerm), eq(true)); Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); verify(spylfs) .rename(eq(usercache), any(Path.class), any(Options.Rename.class)); verify(spylfs) .mkdir(eq(publicCache), eq(defaultPerm), eq(true)); Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR); verify(spylfs) .rename(eq(usercache), any(Path.class), any(Options.Rename.class)); verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true)); } } finally { dispatcher.stop(); delService.stop(); } } @Test @SuppressWarnings("unchecked") // mocked generics public void testResourceRelease() throws Exception { List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalizerTracker mockLocallilzerTracker = mock(LocalizerTracker.class); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); //Ignore actual localization EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerBus); ContainerExecutor exec = mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); DeletionService delService = new DeletionService(exec); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker( isA(Configuration.class)); doReturn(lfs).when(spyService) .getLocalFileContext(isA(Configuration.class)); try { spyService.init(conf); spyService.start(); final String user = "user0"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); //Get a handle on the trackers after they're setup with INIT_APP_RESOURCES LocalResourcesTracker appTracker = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user, appId); LocalResourcesTracker privTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user, appId); LocalResourcesTracker pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId); // init container. final Container c = getMockContainer(appId, 42, user); // init resources Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); // Send localization requests for one resource of each type. final LocalResource privResource = getPrivateMockedResource(r); final LocalResourceRequest privReq = new LocalResourceRequest(privResource); final LocalResource pubResource = getPublicMockedResource(r); final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); final LocalResource pubResource2 = getPublicMockedResource(r); final LocalResourceRequest pubReq2 = new LocalResourceRequest(pubResource2); final LocalResource appResource = getAppMockedResource(r); final LocalResourceRequest appReq = new LocalResourceRequest(appResource); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq)); req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq)); req.put(LocalResourceVisibility.APPLICATION, Collections.singletonList(appReq)); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req2 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req2.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq)); req2.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq2)); Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>(); pubRsrcs.add(pubReq); pubRsrcs.add(pubReq2); // Send Request event spyService.handle(new ContainerLocalizationRequestEvent(c, req)); spyService.handle(new ContainerLocalizationRequestEvent(c, req2)); dispatcher.await(); int privRsrcCount = 0; for (LocalizedResource lr : privTracker) { privRsrcCount++; Assert.assertEquals("Incorrect reference count", 2, lr.getRefCount()); Assert.assertEquals(privReq, lr.getRequest()); } Assert.assertEquals(1, privRsrcCount); int pubRsrcCount = 0; for (LocalizedResource lr : pubTracker) { pubRsrcCount++; Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount()); pubRsrcs.remove(lr.getRequest()); } Assert.assertEquals(0, pubRsrcs.size()); Assert.assertEquals(2, pubRsrcCount); int appRsrcCount = 0; for (LocalizedResource lr : appTracker) { appRsrcCount++; Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount()); Assert.assertEquals(appReq, lr.getRequest()); } Assert.assertEquals(1, appRsrcCount); //Send Cleanup Event spyService.handle(new ContainerLocalizationCleanupEvent(c, req)); verify(mockLocallilzerTracker) .cleanupPrivLocalizers("container_314159265358979_0003_01_000042"); req2.remove(LocalResourceVisibility.PRIVATE); spyService.handle(new ContainerLocalizationCleanupEvent(c, req2)); dispatcher.await(); pubRsrcs.add(pubReq); pubRsrcs.add(pubReq2); privRsrcCount = 0; for (LocalizedResource lr : privTracker) { privRsrcCount++; Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount()); Assert.assertEquals(privReq, lr.getRequest()); } Assert.assertEquals(1, privRsrcCount); pubRsrcCount = 0; for (LocalizedResource lr : pubTracker) { pubRsrcCount++; Assert.assertEquals("Incorrect reference count", 0, lr.getRefCount()); pubRsrcs.remove(lr.getRequest()); } Assert.assertEquals(0, pubRsrcs.size()); Assert.assertEquals(2, pubRsrcCount); appRsrcCount = 0; for (LocalizedResource lr : appTracker) { appRsrcCount++; Assert.assertEquals("Incorrect reference count", 0, lr.getRefCount()); Assert.assertEquals(appReq, lr.getRequest()); } Assert.assertEquals(1, appRsrcCount); } finally { dispatcher.stop(); delService.stop(); } } @Test @SuppressWarnings("unchecked") // mocked generics public void testRecovery() throws Exception { final String user1 = "user1"; final String user2 = "user2"; final ApplicationId appId1 = ApplicationId.newInstance(1, 1); final ApplicationId appId2 = ApplicationId.newInstance(1, 2); List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); //Ignore actual localization EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerBus); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); ResourceLocalizationService spyService = createSpyService(dispatcher, dirsHandler, stateStore); try { spyService.init(conf); spyService.start(); final Application app1 = mock(Application.class); when(app1.getUser()).thenReturn(user1); when(app1.getAppId()).thenReturn(appId1); final Application app2 = mock(Application.class); when(app2.getUser()).thenReturn(user2); when(app2.getAppId()).thenReturn(appId2); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app1)); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app2)); dispatcher.await(); //Get a handle on the trackers after they're setup with INIT_APP_RESOURCES LocalResourcesTracker appTracker1 = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user1, appId1); LocalResourcesTracker privTracker1 = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user1, null); LocalResourcesTracker appTracker2 = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user2, appId2); LocalResourcesTracker pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, null, null); // init containers final Container c1 = getMockContainer(appId1, 1, user1); final Container c2 = getMockContainer(appId2, 2, user2); // init resources Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); // Send localization requests of each type. final LocalResource privResource1 = getPrivateMockedResource(r); final LocalResourceRequest privReq1 = new LocalResourceRequest(privResource1); final LocalResource privResource2 = getPrivateMockedResource(r); final LocalResourceRequest privReq2 = new LocalResourceRequest(privResource2); final LocalResource pubResource1 = getPublicMockedResource(r); final LocalResourceRequest pubReq1 = new LocalResourceRequest(pubResource1); final LocalResource pubResource2 = getPublicMockedResource(r); final LocalResourceRequest pubReq2 = new LocalResourceRequest(pubResource2); final LocalResource appResource1 = getAppMockedResource(r); final LocalResourceRequest appReq1 = new LocalResourceRequest(appResource1); final LocalResource appResource2 = getAppMockedResource(r); final LocalResourceRequest appReq2 = new LocalResourceRequest(appResource2); final LocalResource appResource3 = getAppMockedResource(r); final LocalResourceRequest appReq3 = new LocalResourceRequest(appResource3); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req1 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req1.put(LocalResourceVisibility.PRIVATE, Arrays.asList(new LocalResourceRequest[] { privReq1, privReq2 })); req1.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq1)); req1.put(LocalResourceVisibility.APPLICATION, Collections.singletonList(appReq1)); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req2 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req2.put(LocalResourceVisibility.APPLICATION, Arrays.asList(new LocalResourceRequest[] { appReq2, appReq3 })); req2.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq2)); // Send Request event spyService.handle(new ContainerLocalizationRequestEvent(c1, req1)); spyService.handle(new ContainerLocalizationRequestEvent(c2, req2)); dispatcher.await(); // Simulate start of localization for all resources privTracker1.getPathForLocalization(privReq1, dirsHandler.getLocalPathForWrite( ContainerLocalizer.USERCACHE + user1)); privTracker1.getPathForLocalization(privReq2, dirsHandler.getLocalPathForWrite( ContainerLocalizer.USERCACHE + user1)); LocalizedResource privLr1 = privTracker1.getLocalizedResource(privReq1); LocalizedResource privLr2 = privTracker1.getLocalizedResource(privReq2); appTracker1.getPathForLocalization(appReq1, dirsHandler.getLocalPathForWrite( ContainerLocalizer.APPCACHE + appId1)); LocalizedResource appLr1 = appTracker1.getLocalizedResource(appReq1); appTracker2.getPathForLocalization(appReq2, dirsHandler.getLocalPathForWrite( ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr2 = appTracker2.getLocalizedResource(appReq2); appTracker2.getPathForLocalization(appReq3, dirsHandler.getLocalPathForWrite( ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr3 = appTracker2.getLocalizedResource(appReq3); pubTracker.getPathForLocalization(pubReq1, dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr1 = pubTracker.getLocalizedResource(pubReq1); pubTracker.getPathForLocalization(pubReq2, dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr2 = pubTracker.getLocalizedResource(pubReq2); // Simulate completion of localization for most resources with // possibly different sizes than in the request assertNotNull("Localization not started", privLr1.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq1, privLr1.getLocalPath(), privLr1.getSize() + 5)); assertNotNull("Localization not started", privLr2.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq2, privLr2.getLocalPath(), privLr2.getSize() + 10)); assertNotNull("Localization not started", appLr1.getLocalPath()); appTracker1.handle(new ResourceLocalizedEvent(appReq1, appLr1.getLocalPath(), appLr1.getSize())); assertNotNull("Localization not started", appLr3.getLocalPath()); appTracker2.handle(new ResourceLocalizedEvent(appReq3, appLr3.getLocalPath(), appLr3.getSize() + 7)); assertNotNull("Localization not started", pubLr1.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq1, pubLr1.getLocalPath(), pubLr1.getSize() + 1000)); assertNotNull("Localization not started", pubLr2.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq2, pubLr2.getLocalPath(), pubLr2.getSize() + 99999)); dispatcher.await(); assertEquals(ResourceState.LOCALIZED, privLr1.getState()); assertEquals(ResourceState.LOCALIZED, privLr2.getState()); assertEquals(ResourceState.LOCALIZED, appLr1.getState()); assertEquals(ResourceState.DOWNLOADING, appLr2.getState()); assertEquals(ResourceState.LOCALIZED, appLr3.getState()); assertEquals(ResourceState.LOCALIZED, pubLr1.getState()); assertEquals(ResourceState.LOCALIZED, pubLr2.getState()); // restart and recover spyService = createSpyService(dispatcher, dirsHandler, stateStore); spyService.init(conf); spyService.recoverLocalizedResources( stateStore.loadLocalizationState()); dispatcher.await(); appTracker1 = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user1, appId1); privTracker1 = spyService.getLocalResourcesTracker( LocalResourceVisibility.PRIVATE, user1, null); appTracker2 = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user2, appId2); pubTracker = spyService.getLocalResourcesTracker( LocalResourceVisibility.PUBLIC, null, null); LocalizedResource recoveredRsrc = privTracker1.getLocalizedResource(privReq1); assertEquals(privReq1, recoveredRsrc.getRequest()); assertEquals(privLr1.getLocalPath(), recoveredRsrc.getLocalPath()); assertEquals(privLr1.getSize(), recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState()); recoveredRsrc = privTracker1.getLocalizedResource(privReq2); assertEquals(privReq2, recoveredRsrc.getRequest()); assertEquals(privLr2.getLocalPath(), recoveredRsrc.getLocalPath()); assertEquals(privLr2.getSize(), recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState()); recoveredRsrc = appTracker1.getLocalizedResource(appReq1); assertEquals(appReq1, recoveredRsrc.getRequest()); assertEquals(appLr1.getLocalPath(), recoveredRsrc.getLocalPath()); assertEquals(appLr1.getSize(), recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState()); recoveredRsrc = appTracker2.getLocalizedResource(appReq2); assertNull("in-progress resource should not be present", recoveredRsrc); recoveredRsrc = appTracker2.getLocalizedResource(appReq3); assertEquals(appReq3, recoveredRsrc.getRequest()); assertEquals(appLr3.getLocalPath(), recoveredRsrc.getLocalPath()); assertEquals(appLr3.getSize(), recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState()); } finally { dispatcher.stop(); stateStore.close(); } } @Test( timeout = 10000) @SuppressWarnings("unchecked") // mocked generics public void testLocalizerRunnerException() throws Exception { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); LocalDirsHandlerService dirsHandlerSpy = spy(dirsHandler); dirsHandlerSpy.init(conf); DeletionService delServiceReal = new DeletionService(exec); DeletionService delService = spy(delServiceReal); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandlerSpy, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); try { spyService.init(conf); spyService.start(); // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn("user0"); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final Container c = getMockContainer(appId, 42, "user0"); final LocalResource resource1 = getPrivateMockedResource(r); System.out.println("Here 4"); final LocalResourceRequest req1 = new LocalResourceRequest(resource1); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); List<LocalResourceRequest> privateResourceList = new ArrayList<LocalResourceRequest>(); privateResourceList.add(req1); rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList); final Constructor<?>[] constructors = FSError.class.getDeclaredConstructors(); constructors[0].setAccessible(true); FSError fsError = (FSError) constructors[0].newInstance(new IOException("Disk Error")); Mockito .doThrow(fsError) .when(dirsHandlerSpy) .getLocalPathForWrite(isA(String.class)); spyService.handle(new ContainerLocalizationRequestEvent(c, rsrcs)); Thread.sleep(1000); dispatcher.await(); // Verify if ContainerResourceFailedEvent is invoked on FSError verify(containerBus).handle(isA(ContainerResourceFailedEvent.class)); } finally { spyService.stop(); dispatcher.stop(); delService.stop(); } } @Test( timeout = 10000) @SuppressWarnings("unchecked") // mocked generics public void testLocalizationHeartbeat() throws Exception { List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[1]; // Making sure that we have only one local disk so that it will only be // selected for consecutive resource localization calls. This is required // to test LocalCacheDirectoryManager. localDirs.add(lfs.makeQualified(new Path(basedir, 0 + ""))); sDirs[0] = localDirs.get(0).toString(); conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); // Adding configuration to make sure there is only one file per // directory conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "37"); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); DeletionService delServiceReal = new DeletionService(exec); DeletionService delService = spy(delServiceReal); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask()); FsPermission nmPermission = ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask()); final Path userDir = new Path(sDirs[0].substring("file:".length()), ContainerLocalizer.USERCACHE); final Path fileDir = new Path(sDirs[0].substring("file:".length()), ContainerLocalizer.FILECACHE); final Path sysDir = new Path(sDirs[0].substring("file:".length()), ResourceLocalizationService.NM_PRIVATE_DIR); final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(sDirs[0])); final FileStatus nmFs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, nmPermission, "", "", sysDir); doAnswer(new Answer<FileStatus>() { @Override public FileStatus answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); if (args.length > 0) { if (args[0].equals(userDir) || args[0].equals(fileDir)) { return fs; } } return nmFs; } }).when(spylfs).getFileStatus(isA(Path.class)); try { spyService.init(conf); spyService.start(); // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn("user0"); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); ArgumentMatcher<ApplicationEvent> matchesAppInit = new ArgumentMatcher<ApplicationEvent>() { @Override public boolean matches(Object o) { ApplicationEvent evt = (ApplicationEvent) o; return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID(); } }; dispatcher.await(); verify(applicationBus).handle(argThat(matchesAppInit)); // init container rsrc, localizer Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final Container c = getMockContainer(appId, 42, "user0"); FSDataOutputStream out = new FSDataOutputStream(new DataOutputBuffer(), null); doReturn(out).when(spylfs).createInternal(isA(Path.class), isA(EnumSet.class), isA(FsPermission.class), anyInt(), anyShort(), anyLong(), isA(Progressable.class), isA(ChecksumOpt.class), anyBoolean()); final LocalResource resource1 = getPrivateMockedResource(r); LocalResource resource2 = null; do { resource2 = getPrivateMockedResource(r); } while (resource2 == null || resource2.equals(resource1)); LocalResource resource3 = null; do { resource3 = getPrivateMockedResource(r); } while (resource3 == null || resource3.equals(resource1) || resource3.equals(resource2)); // above call to make sure we don't get identical resources. final LocalResourceRequest req1 = new LocalResourceRequest(resource1); final LocalResourceRequest req2 = new LocalResourceRequest(resource2); final LocalResourceRequest req3 = new LocalResourceRequest(resource3); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); List<LocalResourceRequest> privateResourceList = new ArrayList<LocalResourceRequest>(); privateResourceList.add(req1); privateResourceList.add(req2); privateResourceList.add(req3); rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList); spyService.handle(new ContainerLocalizationRequestEvent(c, rsrcs)); // Sigh. Thread init of private localizer not accessible Thread.sleep(1000); dispatcher.await(); String appStr = ConverterUtils.toString(appId); String ctnrStr = c.getContainerId().toString(); ArgumentCaptor<LocalizerStartContext> contextCaptor = ArgumentCaptor .forClass(LocalizerStartContext.class); verify(exec).startLocalizer(contextCaptor.capture()); LocalizerStartContext context = contextCaptor.getValue(); Path localizationTokenPath = context.getNmPrivateContainerTokens(); assertEquals("user0", context.getUser()); assertEquals(appStr, context.getAppId()); assertEquals(ctnrStr, context.getLocId()); // heartbeat from localizer LocalResourceStatus rsrc1success = mock(LocalResourceStatus.class); LocalResourceStatus rsrc2pending = mock(LocalResourceStatus.class); LocalResourceStatus rsrc2success = mock(LocalResourceStatus.class); LocalResourceStatus rsrc3success = mock(LocalResourceStatus.class); LocalizerStatus stat = mock(LocalizerStatus.class); when(stat.getLocalizerId()).thenReturn(ctnrStr); when(rsrc1success.getResource()).thenReturn(resource1); when(rsrc2pending.getResource()).thenReturn(resource2); when(rsrc2success.getResource()).thenReturn(resource2); when(rsrc3success.getResource()).thenReturn(resource3); when(rsrc1success.getLocalSize()).thenReturn(4344L); when(rsrc2success.getLocalSize()).thenReturn(2342L); when(rsrc3success.getLocalSize()).thenReturn(5345L); URL locPath = getPath("/cache/private/blah"); when(rsrc1success.getLocalPath()).thenReturn(locPath); when(rsrc2success.getLocalPath()).thenReturn(locPath); when(rsrc3success.getLocalPath()).thenReturn(locPath); when(rsrc1success.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(rsrc2pending.getStatus()).thenReturn(ResourceStatusType.FETCH_PENDING); when(rsrc2success.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(rsrc3success.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); // Four heartbeats with sending: // 1 - empty // 2 - resource1 FETCH_SUCCESS // 3 - resource2 FETCH_PENDING // 4 - resource2 FETCH_SUCCESS, resource3 FETCH_SUCCESS List<LocalResourceStatus> rsrcs4 = new ArrayList<LocalResourceStatus>(); rsrcs4.add(rsrc2success); rsrcs4.add(rsrc3success); when(stat.getResources()) .thenReturn(Collections.<LocalResourceStatus>emptyList()) .thenReturn(Collections.singletonList(rsrc1success)) .thenReturn(Collections.singletonList(rsrc2pending)) .thenReturn(rsrcs4) .thenReturn(Collections.<LocalResourceStatus>emptyList()); String localPath = Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR + "user0" + Path.SEPARATOR + ContainerLocalizer.FILECACHE; // First heartbeat LocalizerHeartbeatResponse response = spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE, response.getLocalizerAction()); assertEquals(1, response.getResourceSpecs().size()); assertEquals(req1, new LocalResourceRequest(response.getResourceSpecs().get(0).getResource())); URL localizedPath = response.getResourceSpecs().get(0).getDestinationDirectory(); // Appending to local path unique number(10) generated as a part of // LocalResourcesTracker assertTrue(localizedPath.getFile().endsWith( localPath + Path.SEPARATOR + "10")); // Second heartbeat response = spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE, response.getLocalizerAction()); assertEquals(1, response.getResourceSpecs().size()); assertEquals(req2, new LocalResourceRequest(response.getResourceSpecs() .get(0).getResource())); localizedPath = response.getResourceSpecs().get(0).getDestinationDirectory(); // Resource's destination path should be now inside sub directory 0 as // LocalCacheDirectoryManager will be used and we have restricted number // of files per directory to 1. assertTrue(localizedPath.getFile().endsWith( localPath + Path.SEPARATOR + "0" + Path.SEPARATOR + "11")); // Third heartbeat response = spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE, response.getLocalizerAction()); assertEquals(1, response.getResourceSpecs().size()); assertEquals(req3, new LocalResourceRequest(response.getResourceSpecs() .get(0).getResource())); localizedPath = response.getResourceSpecs().get(0).getDestinationDirectory(); assertTrue(localizedPath.getFile().endsWith( localPath + Path.SEPARATOR + "1" + Path.SEPARATOR + "12")); response = spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE, response.getLocalizerAction()); spyService.handle(new ContainerLocalizationEvent( LocalizationEventType.CONTAINER_RESOURCES_LOCALIZED, c)); // get shutdown after receive CONTAINER_RESOURCES_LOCALIZED event response = spyService.heartbeat(stat); assertEquals(LocalizerAction.DIE, response.getLocalizerAction()); dispatcher.await(); // verify container notification ArgumentMatcher<ContainerEvent> matchesContainerLoc = new ArgumentMatcher<ContainerEvent>() { @Override public boolean matches(Object o) { ContainerEvent evt = (ContainerEvent) o; return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID(); } }; // total 3 resource localzation calls. one for each resource. verify(containerBus, times(3)).handle(argThat(matchesContainerLoc)); // Verify deletion of localization token. verify(delService).delete((String)isNull(), eq(localizationTokenPath)); } finally { spyService.stop(); dispatcher.stop(); delService.stop(); } } @Test @SuppressWarnings("unchecked") public void testPublicResourceInitializesLocalDir() throws Exception { // Setup state to simulate restart NM with existing state meaning no // directory creation during initialization NMStateStoreService spyStateStore = spy(nmContext.getNMStateStore()); when(spyStateStore.canRecover()).thenReturn(true); NMContext spyContext = spy(nmContext); when(spyContext.getNMStateStore()).thenReturn(spyStateStore); List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); DrainDispatcher dispatcher = new DrainDispatcher(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher.init(conf); dispatcher.start(); try { ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, spyContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext( isA(Configuration.class)); spyService.init(conf); final FsPermission defaultPerm = new FsPermission((short)0755); // verify directory is not created at initialization for (Path p : localDirs) { p = new Path((new URI(p.toString())).getPath()); Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); verify(spylfs, never()) .mkdir(eq(publicCache),eq(defaultPerm), eq(true)); } spyService.start(); final String user = "user0"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); // init container. final Container c = getMockContainer(appId, 42, user); // init resources Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); // Queue up public resource localization final LocalResource pubResource1 = getPublicMockedResource(r); final LocalResourceRequest pubReq1 = new LocalResourceRequest(pubResource1); LocalResource pubResource2 = null; do { pubResource2 = getPublicMockedResource(r); } while (pubResource2 == null || pubResource2.equals(pubResource1)); // above call to make sure we don't get identical resources. final LocalResourceRequest pubReq2 = new LocalResourceRequest(pubResource2); Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>(); pubRsrcs.add(pubReq1); pubRsrcs.add(pubReq2); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req.put(LocalResourceVisibility.PUBLIC, pubRsrcs); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); dispatcher.await(); verify(spyService, times(1)).checkAndInitializeLocalDirs(); // verify directory creation for (Path p : localDirs) { p = new Path((new URI(p.toString())).getPath()); Path publicCache = new Path(p, ContainerLocalizer.FILECACHE); verify(spylfs).mkdir(eq(publicCache),eq(defaultPerm), eq(true)); } } finally { dispatcher.stop(); } } @Test(timeout=20000) @SuppressWarnings("unchecked") // mocked generics public void testFailedPublicResource() throws Exception { List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); DrainDispatcher dispatcher = new DrainDispatcher(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher.init(conf); dispatcher.start(); try { ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext( isA(Configuration.class)); spyService.init(conf); spyService.start(); final String user = "user0"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); // init container. final Container c = getMockContainer(appId, 42, user); // init resources Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); // cause chmod to fail after a delay final CyclicBarrier barrier = new CyclicBarrier(2); doAnswer(new Answer<Void>() { public Void answer(InvocationOnMock invocation) throws IOException { try { barrier.await(); } catch (InterruptedException e) { } catch (BrokenBarrierException e) { } throw new IOException("forced failure"); } }).when(spylfs) .setPermission(isA(Path.class), isA(FsPermission.class)); // Queue up two localization requests for the same public resource final LocalResource pubResource = getPublicMockedResource(r); final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq)); Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>(); pubRsrcs.add(pubReq); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); dispatcher.await(); // allow the chmod to fail now that both requests have been queued barrier.await(); verify(containerBus, timeout(5000).times(2)) .handle(isA(ContainerResourceFailedEvent.class)); } finally { dispatcher.stop(); } } /* * Test case for handling RejectedExecutionException and IOException which can * be thrown when adding public resources to the pending queue. * RejectedExecutionException can be thrown either due to the incoming queue * being full or if the ExecutorCompletionService threadpool is shutdown. * Since it's hard to simulate the queue being full, this test just shuts down * the threadpool and makes sure the exception is handled. If anything is * messed up the async dispatcher thread will cause a system exit causing the * test to fail. */ @Test @SuppressWarnings("unchecked") public void testPublicResourceAddResourceExceptions() throws Exception { List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); DrainDispatcher dispatcher = new DrainDispatcher(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); LocalDirsHandlerService dirsHandlerSpy = spy(dirsHandler); dirsHandlerSpy.init(conf); dispatcher.init(conf); dispatcher.start(); try { ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandlerSpy, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext( isA(Configuration.class)); spyService.init(conf); spyService.start(); final String user = "user0"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); // init resources Random r = new Random(); r.setSeed(r.nextLong()); // Queue localization request for the public resource final LocalResource pubResource = getPublicMockedResource(r); final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req .put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq)); // init container. final Container c = getMockContainer(appId, 42, user); // first test ioexception Mockito .doThrow(new IOException()) .when(dirsHandlerSpy) .getLocalPathForWrite(isA(String.class), Mockito.anyLong(), Mockito.anyBoolean()); // send request spyService.handle(new ContainerLocalizationRequestEvent(c, req)); dispatcher.await(); LocalResourcesTracker tracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId); Assert.assertNull(tracker.getLocalizedResource(pubReq)); // test IllegalArgumentException String name = Long.toHexString(r.nextLong()); URL url = getPath("/local/PRIVATE/" + name + "/"); final LocalResource rsrc = BuilderUtils.newLocalResource(url, LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, r.nextInt(1024) + 1024L, r.nextInt(1024) + 2048L, false); final LocalResourceRequest pubReq1 = new LocalResourceRequest(rsrc); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req1 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req1.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq1)); Mockito .doCallRealMethod() .when(dirsHandlerSpy) .getLocalPathForWrite(isA(String.class), Mockito.anyLong(), Mockito.anyBoolean()); // send request spyService.handle(new ContainerLocalizationRequestEvent(c, req1)); dispatcher.await(); tracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId); Assert.assertNull(tracker.getLocalizedResource(pubReq)); // test RejectedExecutionException by shutting down the thread pool PublicLocalizer publicLocalizer = spyService.getPublicLocalizer(); publicLocalizer.threadPool.shutdown(); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); dispatcher.await(); tracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId); Assert.assertNull(tracker.getLocalizedResource(pubReq)); } finally { // if we call stop with events in the queue, an InterruptedException gets // thrown resulting in the dispatcher thread causing a system exit dispatcher.await(); dispatcher.stop(); } } @Test(timeout = 100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception { DrainDispatcher dispatcher1 = null; try { dispatcher1 = new DrainDispatcher(); String user = "testuser"; ApplicationId appId = BuilderUtils.newApplicationId(1, 1); // creating one local directory List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[1]; for (int i = 0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalDirsHandlerService localDirHandler = new LocalDirsHandlerService(); localDirHandler.init(conf); // Registering event handlers EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher1.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); // initializing directory handler. dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls = new ResourceLocalizationService(dispatcher1, exec, delService, localDirHandler, nmContext); dispatcher1.register(LocalizationEventType.class, rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user, appId)); LocalResourceRequest req = new LocalResourceRequest(new Path("file:///tmp"), 123L, LocalResourceType.FILE, LocalResourceVisibility.PRIVATE, ""); // We need to pre-populate the LocalizerRunner as the // Resource Localization Service code internally starts them which // definitely we don't want. // creating new containers and populating corresponding localizer runners // Container - 1 ContainerImpl container1 = createMockContainer(user, 1); String localizerId1 = container1.getContainerId().toString(); rls.getPrivateLocalizers().put( localizerId1, rls.new LocalizerRunner(new LocalizerContext(user, container1 .getContainerId(), null), localizerId1)); LocalizerRunner localizerRunner1 = rls.getLocalizerRunner(localizerId1); dispatcher1.getEventHandler().handle( createContainerLocalizationEvent(container1, LocalResourceVisibility.PRIVATE, req)); Assert .assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 1, 200)); // Container - 2 now makes the request. ContainerImpl container2 = createMockContainer(user, 2); String localizerId2 = container2.getContainerId().toString(); rls.getPrivateLocalizers().put( localizerId2, rls.new LocalizerRunner(new LocalizerContext(user, container2 .getContainerId(), null), localizerId2)); LocalizerRunner localizerRunner2 = rls.getLocalizerRunner(localizerId2); dispatcher1.getEventHandler().handle( createContainerLocalizationEvent(container2, LocalResourceVisibility.PRIVATE, req)); Assert .assertTrue(waitForPrivateDownloadToStart(rls, localizerId2, 1, 200)); // Retrieving localized resource. LocalResourcesTracker tracker = rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user, appId); LocalizedResource lr = tracker.getLocalizedResource(req); // Resource would now have moved into DOWNLOADING state Assert.assertEquals(ResourceState.DOWNLOADING, lr.getState()); // Resource should have one permit Assert.assertEquals(1, lr.sem.availablePermits()); // Resource Localization Service receives first heart beat from // ContainerLocalizer for container1 LocalizerHeartbeatResponse response1 = rls.heartbeat(createLocalizerStatus(localizerId1)); // Resource must have been added to scheduled map Assert.assertEquals(1, localizerRunner1.scheduled.size()); // Checking resource in the response and also available permits for it. Assert.assertEquals(req.getResource(), response1.getResourceSpecs() .get(0).getResource().getResource()); Assert.assertEquals(0, lr.sem.availablePermits()); // Resource Localization Service now receives first heart beat from // ContainerLocalizer for container2 LocalizerHeartbeatResponse response2 = rls.heartbeat(createLocalizerStatus(localizerId2)); // Resource must not have been added to scheduled map Assert.assertEquals(0, localizerRunner2.scheduled.size()); // No resource is returned in response Assert.assertEquals(0, response2.getResourceSpecs().size()); // ContainerLocalizer - 1 now sends failed resource heartbeat. rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1, req)); // Resource Localization should fail and state is modified accordingly. // Also Local should be release on the LocalizedResource. Assert .assertTrue(waitForResourceState(lr, rls, req, LocalResourceVisibility.PRIVATE, user, appId, ResourceState.FAILED, 200)); Assert.assertTrue(lr.getState().equals(ResourceState.FAILED)); Assert.assertEquals(0, localizerRunner1.scheduled.size()); // Now Container-2 once again sends heart beat to resource localization // service // Now container-2 again try to download the resource it should still // not get the resource as the resource is now not in DOWNLOADING state. response2 = rls.heartbeat(createLocalizerStatus(localizerId2)); // Resource must not have been added to scheduled map. // Also as the resource has failed download it will be removed from // pending list. Assert.assertEquals(0, localizerRunner2.scheduled.size()); Assert.assertEquals(0, localizerRunner2.pending.size()); Assert.assertEquals(0, response2.getResourceSpecs().size()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } } @Test(timeout = 10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception { // test the local path where application and user cache files will be // localized. DrainDispatcher dispatcher1 = null; try { dispatcher1 = new DrainDispatcher(); String user = "testuser"; ApplicationId appId = BuilderUtils.newApplicationId(1, 1); // creating one local directory List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[1]; for (int i = 0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); LocalDirsHandlerService localDirHandler = new LocalDirsHandlerService(); localDirHandler.init(conf); // Registering event handlers EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher1.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); // initializing directory handler. dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls = new ResourceLocalizationService(dispatcher1, exec, delService, localDirHandler, nmContext); dispatcher1.register(LocalizationEventType.class, rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user, appId)); // We need to pre-populate the LocalizerRunner as the // Resource Localization Service code internally starts them which // definitely we don't want. // creating new container and populating corresponding localizer runner // Container - 1 Container container1 = createMockContainer(user, 1); String localizerId1 = container1.getContainerId().toString(); rls.getPrivateLocalizers().put( localizerId1, rls.new LocalizerRunner(new LocalizerContext(user, container1 .getContainerId(), null), localizerId1)); // Creating two requests for container // 1) Private resource // 2) Application resource LocalResourceRequest reqPriv = new LocalResourceRequest(new Path("file:///tmp1"), 123L, LocalResourceType.FILE, LocalResourceVisibility.PRIVATE, ""); List<LocalResourceRequest> privList = new ArrayList<LocalResourceRequest>(); privList.add(reqPriv); LocalResourceRequest reqApp = new LocalResourceRequest(new Path("file:///tmp2"), 123L, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, ""); List<LocalResourceRequest> appList = new ArrayList<LocalResourceRequest>(); appList.add(reqApp); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); rsrcs.put(LocalResourceVisibility.APPLICATION, appList); rsrcs.put(LocalResourceVisibility.PRIVATE, privList); dispatcher1.getEventHandler().handle( new ContainerLocalizationRequestEvent(container1, rsrcs)); // Now waiting for resource download to start. Here actual will not start // Only the resources will be populated into pending list. Assert .assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 2, 500)); // Validating user and application cache paths String userCachePath = StringUtils.join(Path.SEPARATOR, Arrays.asList(localDirs.get(0) .toUri().getRawPath(), ContainerLocalizer.USERCACHE, user, ContainerLocalizer.FILECACHE)); String userAppCachePath = StringUtils.join(Path.SEPARATOR, Arrays.asList(localDirs.get(0) .toUri().getRawPath(), ContainerLocalizer.USERCACHE, user, ContainerLocalizer.APPCACHE, appId.toString(), ContainerLocalizer.FILECACHE)); // Now the Application and private resources may come in any order // for download. // For User cahce : // returned destinationPath = user cache path + random number // For App cache : // returned destinationPath = user app cache path + random number int returnedResources = 0; boolean appRsrc = false, privRsrc = false; while (returnedResources < 2) { LocalizerHeartbeatResponse response = rls.heartbeat(createLocalizerStatus(localizerId1)); for (ResourceLocalizationSpec resourceSpec : response .getResourceSpecs()) { returnedResources++; Path destinationDirectory = new Path(resourceSpec.getDestinationDirectory().getFile()); if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) { appRsrc = true; Assert.assertEquals(userAppCachePath, destinationDirectory .getParent().toUri().toString()); } else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) { privRsrc = true; Assert.assertEquals(userCachePath, destinationDirectory.getParent() .toUri().toString()); } else { throw new Exception("Unexpected resource recevied."); } } } // We should receive both the resources (Application and Private) Assert.assertTrue(appRsrc && privRsrc); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } } private LocalizerStatus createLocalizerStatusForFailedResource( String localizerId, LocalResourceRequest req) { LocalizerStatus status = createLocalizerStatus(localizerId); LocalResourceStatus resourceStatus = new LocalResourceStatusPBImpl(); resourceStatus.setException(SerializedException .newInstance(new YarnException("test"))); resourceStatus.setStatus(ResourceStatusType.FETCH_FAILURE); resourceStatus.setResource(req); status.addResourceStatus(resourceStatus); return status; } private LocalizerStatus createLocalizerStatus(String localizerId1) { LocalizerStatus status = new LocalizerStatusPBImpl(); status.setLocalizerId(localizerId1); return status; } private LocalizationEvent createApplicationLocalizationEvent(String user, ApplicationId appId) { Application app = mock(Application.class); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); return new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app); } @Test(timeout = 100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception { DrainDispatcher dispatcher1 = null; String user = "testuser"; try { // creating one local directory List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[1]; for (int i = 0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); // Registering event handlers EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher1 = new DrainDispatcher(); dispatcher1.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher1.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); // initializing directory handler. dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); // Creating and initializing ResourceLocalizationService but not starting // it as otherwise it will remove requests from pending queue. ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher1, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); dispatcher1.register(LocalizationEventType.class, spyService); spyService.init(conf); // Initially pending map should be empty for public localizer Assert.assertEquals(0, spyService.getPublicLocalizer().pending.size()); LocalResourceRequest req = new LocalResourceRequest(new Path("/tmp"), 123L, LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, ""); // Initializing application ApplicationImpl app = mock(ApplicationImpl.class); ApplicationId appId = BuilderUtils.newApplicationId(1, 1); when(app.getAppId()).thenReturn(appId); when(app.getUser()).thenReturn(user); dispatcher1.getEventHandler().handle( new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); // Container - 1 // container requesting the resource ContainerImpl container1 = createMockContainer(user, 1); dispatcher1.getEventHandler().handle( createContainerLocalizationEvent(container1, LocalResourceVisibility.PUBLIC, req)); // Waiting for resource to change into DOWNLOADING state. Assert.assertTrue(waitForResourceState(null, spyService, req, LocalResourceVisibility.PUBLIC, user, null, ResourceState.DOWNLOADING, 200)); // Waiting for download to start. Assert.assertTrue(waitForPublicDownloadToStart(spyService, 1, 200)); LocalizedResource lr = getLocalizedResource(spyService, req, LocalResourceVisibility.PUBLIC, user, null); // Resource would now have moved into DOWNLOADING state Assert.assertEquals(ResourceState.DOWNLOADING, lr.getState()); // pending should have this resource now. Assert.assertEquals(1, spyService.getPublicLocalizer().pending.size()); // Now resource should have 0 permit. Assert.assertEquals(0, lr.sem.availablePermits()); // Container - 2 // Container requesting the same resource. ContainerImpl container2 = createMockContainer(user, 2); dispatcher1.getEventHandler().handle( createContainerLocalizationEvent(container2, LocalResourceVisibility.PUBLIC, req)); // Waiting for download to start. This should return false as new download // will not start Assert.assertFalse(waitForPublicDownloadToStart(spyService, 2, 100)); // Now Failing the resource download. As a part of it // resource state is changed and then lock is released. ResourceFailedLocalizationEvent locFailedEvent = new ResourceFailedLocalizationEvent( req,new Exception("test").toString()); spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, null).handle(locFailedEvent); // Waiting for resource to change into FAILED state. Assert.assertTrue(waitForResourceState(lr, spyService, req, LocalResourceVisibility.PUBLIC, user, null, ResourceState.FAILED, 200)); // releasing lock as a part of download failed process. lr.unlock(); // removing pending download request. spyService.getPublicLocalizer().pending.clear(); // Now I need to simulate a race condition wherein Event is added to // dispatcher before resource state changes to either FAILED or LOCALIZED // Hence sending event directly to dispatcher. LocalizerResourceRequestEvent localizerEvent = new LocalizerResourceRequestEvent(lr, null, mock(LocalizerContext.class), null); dispatcher1.getEventHandler().handle(localizerEvent); // Waiting for download to start. This should return false as new download // will not start Assert.assertFalse(waitForPublicDownloadToStart(spyService, 1, 100)); // Checking available permits now. Assert.assertEquals(1, lr.sem.availablePermits()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } } private boolean waitForPrivateDownloadToStart( ResourceLocalizationService service, String localizerId, int size, int maxWaitTime) { List<LocalizerResourceRequestEvent> pending = null; // Waiting for localizer to be created. do { if (service.getPrivateLocalizers().get(localizerId) != null) { pending = service.getPrivateLocalizers().get(localizerId).pending; } if (pending == null) { try { maxWaitTime -= 20; Thread.sleep(20); } catch (Exception e) { } } else { break; } } while (maxWaitTime > 0); if (pending == null) { return false; } do { if (pending.size() == size) { return true; } else { try { maxWaitTime -= 20; Thread.sleep(20); } catch (Exception e) { } } } while (maxWaitTime > 0); return pending.size() == size; } private boolean waitForPublicDownloadToStart( ResourceLocalizationService service, int size, int maxWaitTime) { Map<Future<Path>, LocalizerResourceRequestEvent> pending = null; // Waiting for localizer to be created. do { if (service.getPublicLocalizer() != null) { pending = service.getPublicLocalizer().pending; } if (pending == null) { try { maxWaitTime -= 20; Thread.sleep(20); } catch (Exception e) { } } else { break; } } while (maxWaitTime > 0); if (pending == null) { return false; } do { if (pending.size() == size) { return true; } else { try { maxWaitTime -= 20; Thread.sleep(20); } catch (InterruptedException e) { } } } while (maxWaitTime > 0); return pending.size() == size; } private LocalizedResource getLocalizedResource( ResourceLocalizationService service, LocalResourceRequest req, LocalResourceVisibility vis, String user, ApplicationId appId) { return service.getLocalResourcesTracker(vis, user, appId) .getLocalizedResource(req); } private boolean waitForResourceState(LocalizedResource lr, ResourceLocalizationService service, LocalResourceRequest req, LocalResourceVisibility vis, String user, ApplicationId appId, ResourceState resourceState, long maxWaitTime) { LocalResourcesTracker tracker = null; // checking tracker is created do { if (tracker == null) { tracker = service.getLocalResourcesTracker(vis, user, appId); } if (tracker != null && lr == null) { lr = tracker.getLocalizedResource(req); } if (lr != null) { break; } else { try { maxWaitTime -= 20; Thread.sleep(20); } catch (InterruptedException e) { } } } while (maxWaitTime > 0); // this will wait till resource state is changed to (resourceState). if (lr == null) { return false; } do { if (!lr.getState().equals(resourceState)) { try { maxWaitTime -= 50; Thread.sleep(50); } catch (InterruptedException e) { } } else { break; } } while (maxWaitTime > 0); return lr.getState().equals(resourceState); } private ContainerLocalizationRequestEvent createContainerLocalizationEvent( ContainerImpl container, LocalResourceVisibility vis, LocalResourceRequest req) { Map<LocalResourceVisibility, Collection<LocalResourceRequest>> reqs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); List<LocalResourceRequest> resourceList = new ArrayList<LocalResourceRequest>(); resourceList.add(req); reqs.put(vis, resourceList); return new ContainerLocalizationRequestEvent(container, reqs); } private ContainerImpl createMockContainer(String user, int containerId) { ContainerImpl container = mock(ContainerImpl.class); when(container.getContainerId()).thenReturn( BuilderUtils.newContainerId(1, 1, 1, containerId)); when(container.getUser()).thenReturn(user); Credentials mockCredentials = mock(Credentials.class); when(container.getCredentials()).thenReturn(mockCredentials); return container; } private static URL getPath(String path) { URL url = BuilderUtils.newURL("file", null, 0, path); return url; } private static LocalResource getMockedResource(Random r, LocalResourceVisibility vis) { String name = Long.toHexString(r.nextLong()); URL url = getPath("/local/PRIVATE/" + name); LocalResource rsrc = BuilderUtils.newLocalResource(url, LocalResourceType.FILE, vis, r.nextInt(1024) + 1024L, r.nextInt(1024) + 2048L, false); return rsrc; } private static LocalResource getAppMockedResource(Random r) { return getMockedResource(r, LocalResourceVisibility.APPLICATION); } private static LocalResource getPublicMockedResource(Random r) { return getMockedResource(r, LocalResourceVisibility.PUBLIC); } private static LocalResource getPrivateMockedResource(Random r) { return getMockedResource(r, LocalResourceVisibility.PRIVATE); } private static Container getMockContainer(ApplicationId appId, int id, String user) throws IOException { Container c = mock(Container.class); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, id); when(c.getUser()).thenReturn(user); when(c.getContainerId()).thenReturn(cId); Credentials creds = new Credentials(); Token<? extends TokenIdentifier> tk = getToken(id); String fingerprint = ResourceLocalizationService.buildTokenFingerprint(tk); assertNotNull(fingerprint); assertTrue( "Expected token fingerprint of 10 hex bytes delimited by space.", fingerprint.matches("^(([0-9a-f]){2} ){9}([0-9a-f]){2}$")); creds.addToken(new Text("tok" + id), tk); when(c.getCredentials()).thenReturn(creds); when(c.toString()).thenReturn(cId.toString()); return c; } private ResourceLocalizationService createSpyService( DrainDispatcher dispatcher, LocalDirsHandlerService dirsHandler, NMStateStoreService stateStore) { ContainerExecutor exec = mock(ContainerExecutor.class); LocalizerTracker mockLocalizerTracker = mock(LocalizerTracker.class); DeletionService delService = mock(DeletionService.class); NMContext nmContext = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), stateStore); ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(mockLocalizerTracker).when(spyService).createLocalizerTracker( isA(Configuration.class)); doReturn(lfs).when(spyService) .getLocalFileContext(isA(Configuration.class)); return spyService; } @SuppressWarnings({ "unchecked", "rawtypes" }) static Token<? extends TokenIdentifier> getToken(int id) { return new Token(("ident" + id).getBytes(), ("passwd" + id).getBytes(), new Text("kind" + id), new Text("service" + id)); } /* * Test to ensure ResourceLocalizationService can handle local dirs going bad. * Test first sets up all the components required, then sends events to fetch * a private, app and public resource. It then sends events to clean up the * container and the app and ensures the right delete calls were made. */ @Test @SuppressWarnings("unchecked") // mocked generics public void testFailedDirsResourceRelease() throws Exception { // setup components File f = new File(basedir.toString()); String[] sDirs = new String[4]; List<Path> localDirs = new ArrayList<Path>(sDirs.length); for (int i = 0; i < 4; ++i) { sDirs[i] = f.getAbsolutePath() + i; localDirs.add(new Path(sDirs[i])); } List<Path> containerLocalDirs = new ArrayList<Path>(localDirs.size()); List<Path> appLocalDirs = new ArrayList<Path>(localDirs.size()); List<Path> nmLocalContainerDirs = new ArrayList<Path>(localDirs.size()); List<Path> nmLocalAppDirs = new ArrayList<Path>(localDirs.size()); conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500); LocalizerTracker mockLocallilzerTracker = mock(LocalizerTracker.class); DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); // Ignore actual localization EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerBus); ContainerExecutor exec = mock(ContainerExecutor.class); LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class); doReturn(new ArrayList<String>(Arrays.asList(sDirs))).when( mockDirsHandler).getLocalDirsForCleanup(); DeletionService delService = mock(DeletionService.class); // setup mocks ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, mockDirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker( isA(Configuration.class)); doReturn(lfs).when(spyService) .getLocalFileContext(isA(Configuration.class)); FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask()); FsPermission nmPermission = ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask()); final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", localDirs.get(0)); final FileStatus nmFs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, nmPermission, "", "", localDirs.get(0)); final String user = "user0"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); when(app.toString()).thenReturn(ConverterUtils.toString(appId)); // init container. final Container c = getMockContainer(appId, 42, user); // setup local app dirs List<String> tmpDirs = mockDirsHandler.getLocalDirs(); for (int i = 0; i < tmpDirs.size(); ++i) { Path usersdir = new Path(tmpDirs.get(i), ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, user); Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE); Path appDir = new Path(allAppsdir, ConverterUtils.toString(appId)); Path containerDir = new Path(appDir, ConverterUtils.toString(c.getContainerId())); containerLocalDirs.add(containerDir); appLocalDirs.add(appDir); Path sysDir = new Path(tmpDirs.get(i), ResourceLocalizationService.NM_PRIVATE_DIR); Path appSysDir = new Path(sysDir, ConverterUtils.toString(appId)); Path containerSysDir = new Path(appSysDir, ConverterUtils.toString(c.getContainerId())); nmLocalContainerDirs.add(containerSysDir); nmLocalAppDirs.add(appSysDir); } try { spyService.init(conf); spyService.start(); spyService.handle(new ApplicationLocalizationEvent( LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); // Get a handle on the trackers after they're setup with // INIT_APP_RESOURCES LocalResourcesTracker appTracker = spyService.getLocalResourcesTracker( LocalResourceVisibility.APPLICATION, user, appId); LocalResourcesTracker privTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user, appId); LocalResourcesTracker pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId); // init resources Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); // Send localization requests, one for each type of resource final LocalResource privResource = getPrivateMockedResource(r); final LocalResourceRequest privReq = new LocalResourceRequest(privResource); final LocalResource appResource = getAppMockedResource(r); final LocalResourceRequest appReq = new LocalResourceRequest(appResource); final LocalResource pubResource = getPublicMockedResource(r); final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq)); req.put(LocalResourceVisibility.APPLICATION, Collections.singletonList(appReq)); req .put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq)); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req2 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req2.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq)); // Send Request event spyService.handle(new ContainerLocalizationRequestEvent(c, req)); spyService.handle(new ContainerLocalizationRequestEvent(c, req2)); dispatcher.await(); int privRsrcCount = 0; for (LocalizedResource lr : privTracker) { privRsrcCount++; Assert.assertEquals("Incorrect reference count", 2, lr.getRefCount()); Assert.assertEquals(privReq, lr.getRequest()); } Assert.assertEquals(1, privRsrcCount); int appRsrcCount = 0; for (LocalizedResource lr : appTracker) { appRsrcCount++; Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount()); Assert.assertEquals(appReq, lr.getRequest()); } Assert.assertEquals(1, appRsrcCount); int pubRsrcCount = 0; for (LocalizedResource lr : pubTracker) { pubRsrcCount++; Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount()); Assert.assertEquals(pubReq, lr.getRequest()); } Assert.assertEquals(1, pubRsrcCount); // setup mocks for test, a set of dirs with IOExceptions and let the rest // go through for (int i = 0; i < containerLocalDirs.size(); ++i) { if (i == 2) { Mockito.doThrow(new IOException()).when(spylfs) .getFileStatus(eq(containerLocalDirs.get(i))); Mockito.doThrow(new IOException()).when(spylfs) .getFileStatus(eq(nmLocalContainerDirs.get(i))); } else { doReturn(fs).when(spylfs) .getFileStatus(eq(containerLocalDirs.get(i))); doReturn(nmFs).when(spylfs).getFileStatus( eq(nmLocalContainerDirs.get(i))); } } // Send Cleanup Event spyService.handle(new ContainerLocalizationCleanupEvent(c, req)); verify(mockLocallilzerTracker).cleanupPrivLocalizers( "container_314159265358979_0003_01_000042"); // match cleanup events with the mocks we setup earlier for (int i = 0; i < containerLocalDirs.size(); ++i) { if (i == 2) { try { verify(delService).delete(user, containerLocalDirs.get(i)); verify(delService).delete(null, nmLocalContainerDirs.get(i)); Assert.fail("deletion attempts for invalid dirs"); } catch (Throwable e) { continue; } } else { verify(delService).delete(user, containerLocalDirs.get(i)); verify(delService).delete(null, nmLocalContainerDirs.get(i)); } } ArgumentMatcher<ApplicationEvent> matchesAppDestroy = new ArgumentMatcher<ApplicationEvent>() { @Override public boolean matches(Object o) { ApplicationEvent evt = (ApplicationEvent) o; return (evt.getType() == ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP) && appId == evt.getApplicationID(); } }; dispatcher.await(); // setup mocks again, this time throw UnsupportedFileSystemException and // IOExceptions for (int i = 0; i < containerLocalDirs.size(); ++i) { if (i == 3) { Mockito.doThrow(new IOException()).when(spylfs) .getFileStatus(eq(appLocalDirs.get(i))); Mockito.doThrow(new UnsupportedFileSystemException("test")) .when(spylfs).getFileStatus(eq(nmLocalAppDirs.get(i))); } else { doReturn(fs).when(spylfs).getFileStatus(eq(appLocalDirs.get(i))); doReturn(nmFs).when(spylfs).getFileStatus(eq(nmLocalAppDirs.get(i))); } } LocalizationEvent destroyApp = new ApplicationLocalizationEvent( LocalizationEventType.DESTROY_APPLICATION_RESOURCES, app); spyService.handle(destroyApp); verify(applicationBus).handle(argThat(matchesAppDestroy)); // verify we got the right delete calls for (int i = 0; i < containerLocalDirs.size(); ++i) { if (i == 3) { try { verify(delService).delete(user, containerLocalDirs.get(i)); verify(delService).delete(null, nmLocalContainerDirs.get(i)); Assert.fail("deletion attempts for invalid dirs"); } catch (Throwable e) { continue; } } else { verify(delService).delete(user, appLocalDirs.get(i)); verify(delService).delete(null, nmLocalAppDirs.get(i)); } } } finally { dispatcher.stop(); delService.stop(); } } }
99,670
41.305178
121
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent; import org.junit.Test; import org.mockito.ArgumentMatcher; public class TestLocalizedResource { static ContainerId getMockContainer(long id) { ApplicationId appId = mock(ApplicationId.class); when(appId.getClusterTimestamp()).thenReturn(314159265L); when(appId.getId()).thenReturn(3); ApplicationAttemptId appAttemptId = mock(ApplicationAttemptId.class); when(appAttemptId.getApplicationId()).thenReturn(appId); when(appAttemptId.getAttemptId()).thenReturn(0); ContainerId container = mock(ContainerId.class); when(container.getContainerId()).thenReturn(id); when(container.getApplicationAttemptId()).thenReturn(appAttemptId); return container; } @Test @SuppressWarnings("unchecked") // mocked generic public void testNotification() throws Exception { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(new Configuration()); try { dispatcher.start(); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); dispatcher.register(LocalizerEventType.class, localizerBus); // mock resource LocalResource apiRsrc = createMockResource(); final ContainerId container0 = getMockContainer(0L); final Credentials creds0 = new Credentials(); final LocalResourceVisibility vis0 = LocalResourceVisibility.PRIVATE; final LocalizerContext ctxt0 = new LocalizerContext("yak", container0, creds0); LocalResourceRequest rsrcA = new LocalResourceRequest(apiRsrc); LocalizedResource local = new LocalizedResource(rsrcA, dispatcher); local.handle(new ResourceRequestEvent(rsrcA, vis0, ctxt0)); dispatcher.await(); // Register C0, verify request event LocalizerEventMatcher matchesL0Req = new LocalizerEventMatcher(container0, creds0, vis0, LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL0Req)); assertEquals(ResourceState.DOWNLOADING, local.getState()); // Register C1, verify request event final Credentials creds1 = new Credentials(); final ContainerId container1 = getMockContainer(1L); final LocalizerContext ctxt1 = new LocalizerContext("yak", container1, creds1); final LocalResourceVisibility vis1 = LocalResourceVisibility.PUBLIC; local.handle(new ResourceRequestEvent(rsrcA, vis1, ctxt1)); dispatcher.await(); LocalizerEventMatcher matchesL1Req = new LocalizerEventMatcher(container1, creds1, vis1, LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL1Req)); // Release C0 container localization, verify no notification local.handle(new ResourceReleaseEvent(rsrcA, container0)); dispatcher.await(); verify(containerBus, never()).handle(isA(ContainerEvent.class)); assertEquals(ResourceState.DOWNLOADING, local.getState()); // Release C1 container localization, verify no notification local.handle(new ResourceReleaseEvent(rsrcA, container1)); dispatcher.await(); verify(containerBus, never()).handle(isA(ContainerEvent.class)); assertEquals(ResourceState.DOWNLOADING, local.getState()); // Register C2, C3 final ContainerId container2 = getMockContainer(2L); final LocalResourceVisibility vis2 = LocalResourceVisibility.PRIVATE; final Credentials creds2 = new Credentials(); final LocalizerContext ctxt2 = new LocalizerContext("yak", container2, creds2); final ContainerId container3 = getMockContainer(3L); final LocalResourceVisibility vis3 = LocalResourceVisibility.PRIVATE; final Credentials creds3 = new Credentials(); final LocalizerContext ctxt3 = new LocalizerContext("yak", container3, creds3); local.handle(new ResourceRequestEvent(rsrcA, vis2, ctxt2)); local.handle(new ResourceRequestEvent(rsrcA, vis3, ctxt3)); dispatcher.await(); LocalizerEventMatcher matchesL2Req = new LocalizerEventMatcher(container2, creds2, vis2, LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL2Req)); LocalizerEventMatcher matchesL3Req = new LocalizerEventMatcher(container3, creds3, vis3, LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL3Req)); // Successful localization. verify notification C2, C3 Path locA = new Path("file:///cache/rsrcA"); local.handle(new ResourceLocalizedEvent(rsrcA, locA, 10)); dispatcher.await(); ContainerEventMatcher matchesC2Localized = new ContainerEventMatcher(container2, ContainerEventType.RESOURCE_LOCALIZED); ContainerEventMatcher matchesC3Localized = new ContainerEventMatcher(container3, ContainerEventType.RESOURCE_LOCALIZED); verify(containerBus).handle(argThat(matchesC2Localized)); verify(containerBus).handle(argThat(matchesC3Localized)); assertEquals(ResourceState.LOCALIZED, local.getState()); // Register C4, verify notification final ContainerId container4 = getMockContainer(4L); final Credentials creds4 = new Credentials(); final LocalizerContext ctxt4 = new LocalizerContext("yak", container4, creds4); final LocalResourceVisibility vis4 = LocalResourceVisibility.PRIVATE; local.handle(new ResourceRequestEvent(rsrcA, vis4, ctxt4)); dispatcher.await(); ContainerEventMatcher matchesC4Localized = new ContainerEventMatcher(container4, ContainerEventType.RESOURCE_LOCALIZED); verify(containerBus).handle(argThat(matchesC4Localized)); assertEquals(ResourceState.LOCALIZED, local.getState()); } finally { dispatcher.stop(); } } static LocalResource createMockResource() { // mock rsrc location org.apache.hadoop.yarn.api.records.URL uriA = mock(org.apache.hadoop.yarn.api.records.URL.class); when(uriA.getScheme()).thenReturn("file"); when(uriA.getHost()).thenReturn(null); when(uriA.getFile()).thenReturn("/localA/rsrc"); LocalResource apiRsrc = mock(LocalResource.class); when(apiRsrc.getResource()).thenReturn(uriA); when(apiRsrc.getTimestamp()).thenReturn(4344L); when(apiRsrc.getType()).thenReturn(LocalResourceType.FILE); return apiRsrc; } static class LocalizerEventMatcher extends ArgumentMatcher<LocalizerEvent> { Credentials creds; LocalResourceVisibility vis; private final ContainerId idRef; private final LocalizerEventType type; public LocalizerEventMatcher(ContainerId idRef, Credentials creds, LocalResourceVisibility vis, LocalizerEventType type) { this.vis = vis; this.type = type; this.creds = creds; this.idRef = idRef; } @Override public boolean matches(Object o) { if (!(o instanceof LocalizerResourceRequestEvent)) return false; LocalizerResourceRequestEvent evt = (LocalizerResourceRequestEvent) o; return idRef == evt.getContext().getContainerId() && type == evt.getType() && vis == evt.getVisibility() && creds == evt.getContext().getCredentials(); } } static class ContainerEventMatcher extends ArgumentMatcher<ContainerEvent> { private final ContainerId idRef; private final ContainerEventType type; public ContainerEventMatcher(ContainerId idRef, ContainerEventType type) { this.idRef = idRef; this.type = type; } @Override public boolean matches(Object o) { if (!(o instanceof ContainerEvent)) return false; ContainerEvent evt = (ContainerEvent) o; return idRef == evt.getContainerID() && type == evt.getType(); } } }
10,535
43.834043
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/FakeFSDataInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.Seekable; /** mock streams in unit tests */ public class FakeFSDataInputStream extends FilterInputStream implements Seekable, PositionedReadable { public FakeFSDataInputStream(InputStream in) { super(in); } public void seek(long pos) throws IOException { } public long getPos() throws IOException { return -1; } public boolean seekToNewSource(long targetPos) throws IOException { return false; } public int read(long position, byte[] buffer, int offset, int length) throws IOException { return -1; } public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { } public void readFully(long position, byte[] buffer) throws IOException { } }
1,746
40.595238
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalCacheDirectoryManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import org.junit.Assert; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalCacheDirectoryManager.Directory; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.junit.Test; public class TestLocalCacheDirectoryManager { @Test(timeout = 10000) public void testHierarchicalSubDirectoryCreation() { // setting per directory file limit to 1. YarnConfiguration conf = new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "37"); LocalCacheDirectoryManager hDir = new LocalCacheDirectoryManager(conf); // Test root directory path = "" Assert.assertTrue(hDir.getRelativePathForLocalization().isEmpty()); // Testing path generation from "0" to "0/0/z/z" for (int i = 1; i <= 37 * 36 * 36; i++) { StringBuffer sb = new StringBuffer(); String num = Integer.toString(i - 1, 36); if (num.length() == 1) { sb.append(num.charAt(0)); } else { sb.append(Integer.toString( Integer.parseInt(num.substring(0, 1), 36) - 1, 36)); } for (int j = 1; j < num.length(); j++) { sb.append(Path.SEPARATOR).append(num.charAt(j)); } Assert.assertEquals(sb.toString(), hDir.getRelativePathForLocalization()); } String testPath1 = "4"; String testPath2 = "2"; /* * Making sure directory "4" and "2" becomes non-full so that they are * reused for future getRelativePathForLocalization() calls in the order * they are freed. */ hDir.decrementFileCountForPath(testPath1); hDir.decrementFileCountForPath(testPath2); // After below call directory "4" should become full. Assert.assertEquals(testPath1, hDir.getRelativePathForLocalization()); Assert.assertEquals(testPath2, hDir.getRelativePathForLocalization()); } @Test(timeout = 10000) public void testMinimumPerDirectoryFileLimit() { YarnConfiguration conf = new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "1"); Exception e = null; NMContext nmContext = new NMContext(new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf), new NMNullStateStoreService()); ResourceLocalizationService service = new ResourceLocalizationService(null, null, null, null, nmContext); try { service.init(conf); } catch (Exception e1) { e = e1; } Assert.assertNotNull(e); Assert.assertEquals(YarnRuntimeException.class, e.getClass()); Assert.assertEquals(e.getMessage(), YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with a value less than 37."); } @Test(timeout = 1000) public void testDirectoryStateChangeFromFullToNonFull() { YarnConfiguration conf = new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "40"); LocalCacheDirectoryManager dir = new LocalCacheDirectoryManager(conf); // checking for first four paths String rootPath = ""; String firstSubDir = "0"; for (int i = 0; i < 4; i++) { Assert.assertEquals(rootPath, dir.getRelativePathForLocalization()); } // Releasing two files from the root directory. dir.decrementFileCountForPath(rootPath); dir.decrementFileCountForPath(rootPath); // Space for two files should be available in root directory. Assert.assertEquals(rootPath, dir.getRelativePathForLocalization()); Assert.assertEquals(rootPath, dir.getRelativePathForLocalization()); // As no space is now available in root directory so it should be from // first sub directory Assert.assertEquals(firstSubDir, dir.getRelativePathForLocalization()); } @Test public void testDirectoryConversion() { for (int i = 0; i < 10000; ++i) { String path = Directory.getRelativePath(i); Assert.assertEquals("Incorrect conversion for " + i, i, Directory.getDirectoryNumber(path)); } } @Test public void testIncrementFileCountForPath() { YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2); LocalCacheDirectoryManager mgr = new LocalCacheDirectoryManager(conf); final String rootPath = ""; mgr.incrementFileCountForPath(rootPath); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); Assert.assertFalse("root dir should be full", rootPath.equals(mgr.getRelativePathForLocalization())); // finish filling the other directory mgr.getRelativePathForLocalization(); // free up space in the root dir mgr.decrementFileCountForPath(rootPath); mgr.decrementFileCountForPath(rootPath); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); String otherDir = mgr.getRelativePathForLocalization(); Assert.assertFalse("root dir should be full", otherDir.equals(rootPath)); final String deepDir0 = "d/e/e/p/0"; final String deepDir1 = "d/e/e/p/1"; final String deepDir2 = "d/e/e/p/2"; final String deepDir3 = "d/e/e/p/3"; mgr.incrementFileCountForPath(deepDir0); Assert.assertEquals(otherDir, mgr.getRelativePathForLocalization()); Assert.assertEquals(deepDir0, mgr.getRelativePathForLocalization()); Assert.assertEquals("total dir count incorrect after increment", deepDir1, mgr.getRelativePathForLocalization()); mgr.incrementFileCountForPath(deepDir2); mgr.incrementFileCountForPath(deepDir1); mgr.incrementFileCountForPath(deepDir2); Assert.assertEquals(deepDir3, mgr.getRelativePathForLocalization()); } }
7,215
41.952381
113
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/MockLocalizerStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; public class MockLocalizerStatus implements LocalizerStatus { private String locId; private List<LocalResourceStatus> stats; public MockLocalizerStatus() { stats = new ArrayList<LocalResourceStatus>(); } public MockLocalizerStatus(String locId, List<LocalResourceStatus> stats) { this.locId = locId; this.stats = stats; } @Override public String getLocalizerId() { return locId; } @Override public List<LocalResourceStatus> getResources() { return stats; } @Override public void setLocalizerId(String id) { this.locId = id; } @Override public void addAllResources(List<LocalResourceStatus> rsrcs) { stats.addAll(rsrcs); } @Override public LocalResourceStatus getResourceStatus(int index) { return stats.get(index); } @Override public void addResourceStatus(LocalResourceStatus resource) { stats.add(resource); } @Override public void removeResource(int index) { stats.remove(index); } public void clearResources() { stats.clear(); } @Override public boolean equals(Object o) { if (!(o instanceof MockLocalizerStatus)) { return false; } MockLocalizerStatus other = (MockLocalizerStatus) o; return getLocalizerId().equals(other.getLocalizerId()) && getResources().containsAll(other.getResources()) && other.getResources().containsAll(getResources()); } @Override public int hashCode() { return 4344; } }
2,536
30.320988
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.mockito.Mockito.any; import static org.mockito.Matchers.isA; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.timeout; import java.io.File; import java.io.IOException; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceFailedLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceLocalizedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Test; import org.mockito.ArgumentCaptor; public class TestLocalResourcesTrackerImpl { @Test(timeout=10000) @SuppressWarnings("unchecked") public void test() { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); DeletionService mockDelService = mock(DeletionService.class); ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); ContainerId cId2 = BuilderUtils.newContainerId(1, 1, 1, 2); LocalizerContext lc2 = new LocalizerContext(user, cId2, null); LocalResourceRequest req1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); LocalResourceRequest req2 = createLocalResourceRequest(user, 2, 1, LocalResourceVisibility.PUBLIC); LocalizedResource lr1 = createLocalizedResource(req1, dispatcher); LocalizedResource lr2 = createLocalizedResource(req2, dispatcher); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); localrsrc.put(req1, lr1); localrsrc.put(req2, lr2); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, false, conf, new NMNullStateStoreService()); ResourceEvent req11Event = new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc1); ResourceEvent req12Event = new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc2); ResourceEvent req21Event = new ResourceRequestEvent(req2, LocalResourceVisibility.PUBLIC, lc1); ResourceEvent rel11Event = new ResourceReleaseEvent(req1, cId1); ResourceEvent rel12Event = new ResourceReleaseEvent(req1, cId2); ResourceEvent rel21Event = new ResourceReleaseEvent(req2, cId1); // Localize R1 for C1 tracker.handle(req11Event); // Localize R1 for C2 tracker.handle(req12Event); // Localize R2 for C1 tracker.handle(req21Event); dispatcher.await(); verify(localizerEventHandler, times(3)).handle( any(LocalizerResourceRequestEvent.class)); // Verify refCount for R1 is 2 Assert.assertEquals(2, lr1.getRefCount()); // Verify refCount for R2 is 1 Assert.assertEquals(1, lr2.getRefCount()); // Release R2 for C1 tracker.handle(rel21Event); dispatcher.await(); verifyTrackedResourceCount(tracker, 2); // Verify resource with non zero ref count is not removed. Assert.assertEquals(2, lr1.getRefCount()); Assert.assertFalse(tracker.remove(lr1, mockDelService)); verifyTrackedResourceCount(tracker, 2); // Localize resource1 ResourceLocalizedEvent rle = new ResourceLocalizedEvent(req1, new Path("file:///tmp/r1"), 1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); // Release resource1 tracker.handle(rel11Event); tracker.handle(rel12Event); Assert.assertEquals(0, lr1.getRefCount()); // Verify resources in state LOCALIZED with ref-count=0 is removed. Assert.assertTrue(tracker.remove(lr1, mockDelService)); verifyTrackedResourceCount(tracker, 1); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency() { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); LocalResourceRequest req1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); LocalizedResource lr1 = createLocalizedResource(req1, dispatcher); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); localrsrc.put(req1, lr1); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, false, conf, new NMNullStateStoreService()); ResourceEvent req11Event = new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc1); ResourceEvent rel11Event = new ResourceReleaseEvent(req1, cId1); // Localize R1 for C1 tracker.handle(req11Event); dispatcher.await(); // Verify refCount for R1 is 1 Assert.assertEquals(1, lr1.getRefCount()); dispatcher.await(); verifyTrackedResourceCount(tracker, 1); // Localize resource1 ResourceLocalizedEvent rle = new ResourceLocalizedEvent(req1, new Path( "file:///tmp/r1"), 1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1"))); LocalizedResource rsrcbefore = tracker.iterator().next(); File resFile = new File(lr1.getLocalPath().toUri().getRawPath() .toString()); Assert.assertTrue(resFile.exists()); Assert.assertTrue(resFile.delete()); // Localize R1 for C1 tracker.handle(req11Event); dispatcher.await(); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); LocalizedResource rsrcafter = tracker.iterator().next(); if (rsrcbefore == rsrcafter) { Assert.fail("Localized resource should not be equal"); } // Release resource1 tracker.handle(rel11Event); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test(timeout = 1000) @SuppressWarnings("unchecked") public void testLocalResourceCache() { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<ContainerEvent> containerEventHandler = mock(EventHandler.class); // Registering event handlers. dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, true, conf, new NMNullStateStoreService()); LocalResourceRequest lr = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); // Creating 2 containers for same application which will be requesting // same local resource. // Container 1 requesting local resource. ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); ResourceEvent reqEvent1 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc1); // No resource request is initially present in local cache Assert.assertEquals(0, localrsrc.size()); // Container-1 requesting local resource. tracker.handle(reqEvent1); dispatcher.await(); // New localized Resource should have been added to local resource map // and the requesting container will be added to its waiting queue. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1)); Assert.assertEquals(ResourceState.DOWNLOADING, localrsrc.get(lr) .getState()); // Container 2 requesting the resource ContainerId cId2 = BuilderUtils.newContainerId(1, 1, 1, 2); LocalizerContext lc2 = new LocalizerContext(user, cId2, null); ResourceEvent reqEvent2 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc2); tracker.handle(reqEvent2); dispatcher.await(); // Container 2 should have been added to the waiting queue of the local // resource Assert.assertEquals(2, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2)); // Failing resource localization ResourceEvent resourceFailedEvent = new ResourceFailedLocalizationEvent( lr,(new Exception("test").getMessage())); // Backing up the resource to track its state change as it will be // removed after the failed event. LocalizedResource localizedResource = localrsrc.get(lr); tracker.handle(resourceFailedEvent); dispatcher.await(); // After receiving failed resource event; all waiting containers will be // notified with Container Resource Failed Event. Assert.assertEquals(0, localrsrc.size()); verify(containerEventHandler, timeout(1000).times(2)).handle( isA(ContainerResourceFailedEvent.class)); Assert.assertEquals(ResourceState.FAILED, localizedResource.getState()); // Container 1 trying to release the resource (This resource is already // deleted from the cache. This call should return silently without // exception. ResourceReleaseEvent relEvent1 = new ResourceReleaseEvent(lr, cId1); tracker.handle(relEvent1); dispatcher.await(); // Container-3 now requests for the same resource. This request call // is coming prior to Container-2's release call. ContainerId cId3 = BuilderUtils.newContainerId(1, 1, 1, 3); LocalizerContext lc3 = new LocalizerContext(user, cId3, null); ResourceEvent reqEvent3 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc3); tracker.handle(reqEvent3); dispatcher.await(); // Local resource cache now should have the requested resource and the // number of waiting containers should be 1. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); // Container-2 Releases the resource ResourceReleaseEvent relEvent2 = new ResourceReleaseEvent(lr, cId2); tracker.handle(relEvent2); dispatcher.await(); // Making sure that there is no change in the cache after the release. Assert.assertEquals(1, localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); // Sending ResourceLocalizedEvent to tracker. In turn resource should // send Container Resource Localized Event to waiting containers. Path localizedPath = new Path("/tmp/file1"); ResourceLocalizedEvent localizedEvent = new ResourceLocalizedEvent(lr, localizedPath, 123L); tracker.handle(localizedEvent); dispatcher.await(); // Verifying ContainerResourceLocalizedEvent . verify(containerEventHandler, timeout(1000).times(1)).handle( isA(ContainerResourceLocalizedEvent.class)); Assert.assertEquals(ResourceState.LOCALIZED, localrsrc.get(lr) .getState()); Assert.assertEquals(1, localrsrc.get(lr).getRefCount()); // Container-3 releasing the resource. ResourceReleaseEvent relEvent3 = new ResourceReleaseEvent(lr, cId3); tracker.handle(relEvent3); dispatcher.await(); Assert.assertEquals(0, localrsrc.get(lr).getRefCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test(timeout = 100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories() { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); // setting per directory file limit to 1. conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "37"); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); DeletionService mockDelService = mock(DeletionService.class); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, true, conf, new NMNullStateStoreService()); // This is a random path. NO File creation will take place at this place. Path localDir = new Path("/tmp"); // Container 1 needs lr1 resource ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); // Container 1 requests lr1 to be localized ResourceEvent reqEvent1 = new ResourceRequestEvent(lr1, LocalResourceVisibility.PUBLIC, lc1); tracker.handle(reqEvent1); // Simulate the process of localization of lr1 // NOTE: Localization path from tracker has resource ID at end Path hierarchicalPath1 = tracker.getPathForLocalization(lr1, localDir).getParent(); // Simulate lr1 getting localized ResourceLocalizedEvent rle1 = new ResourceLocalizedEvent(lr1, new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"), 120); tracker.handle(rle1); // Localization successful. LocalResourceRequest lr2 = createLocalResourceRequest(user, 3, 3, LocalResourceVisibility.PUBLIC); // Container 1 requests lr2 to be localized. ResourceEvent reqEvent2 = new ResourceRequestEvent(lr2, LocalResourceVisibility.PUBLIC, lc1); tracker.handle(reqEvent2); Path hierarchicalPath2 = tracker.getPathForLocalization(lr2, localDir).getParent(); // localization failed. ResourceFailedLocalizationEvent rfe2 = new ResourceFailedLocalizationEvent( lr2, new Exception("Test").toString()); tracker.handle(rfe2); /* * The path returned for two localization should be different because we * are limiting one file per sub-directory. */ Assert.assertNotSame(hierarchicalPath1, hierarchicalPath2); LocalResourceRequest lr3 = createLocalResourceRequest(user, 2, 2, LocalResourceVisibility.PUBLIC); ResourceEvent reqEvent3 = new ResourceRequestEvent(lr3, LocalResourceVisibility.PUBLIC, lc1); tracker.handle(reqEvent3); Path hierarchicalPath3 = tracker.getPathForLocalization(lr3, localDir).getParent(); // localization successful ResourceLocalizedEvent rle3 = new ResourceLocalizedEvent(lr3, new Path(hierarchicalPath3.toUri() .toString() + Path.SEPARATOR + "file3"), 120); tracker.handle(rle3); // Verifying that path created is inside the subdirectory Assert.assertEquals(hierarchicalPath3.toUri().toString(), hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0"); // Container 1 releases resource lr1 ResourceEvent relEvent1 = new ResourceReleaseEvent(lr1, cId1); tracker.handle(relEvent1); // Validate the file counts now int resources = 0; Iterator<LocalizedResource> iter = tracker.iterator(); while (iter.hasNext()) { iter.next(); resources++; } // There should be only two resources lr1 and lr3 now. Assert.assertEquals(2, resources); // Now simulate cache cleanup - removes unused resources. iter = tracker.iterator(); while (iter.hasNext()) { LocalizedResource rsrc = iter.next(); if (rsrc.getRefCount() == 0) { Assert.assertTrue(tracker.remove(rsrc, mockDelService)); resources--; } } // lr1 is not used by anyone and will be removed, only lr3 will hang // around Assert.assertEquals(1, resources); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception { final String user = "someuser"; final ApplicationId appId = ApplicationId.newInstance(1, 1); // This is a random path. NO File creation will take place at this place. final Path localDir = new Path("/tmp"); Configuration conf = new YarnConfiguration(); DrainDispatcher dispatcher = null; dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); DeletionService mockDelService = mock(DeletionService.class); NMStateStoreService stateStore = mock(NMStateStoreService.class); try { LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, false, conf, stateStore); // Container 1 needs lr1 resource ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.APPLICATION); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); // Container 1 requests lr1 to be localized ResourceEvent reqEvent1 = new ResourceRequestEvent(lr1, LocalResourceVisibility.APPLICATION, lc1); tracker.handle(reqEvent1); dispatcher.await(); // Simulate the process of localization of lr1 Path hierarchicalPath1 = tracker.getPathForLocalization(lr1, localDir); ArgumentCaptor<LocalResourceProto> localResourceCaptor = ArgumentCaptor.forClass(LocalResourceProto.class); ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class); verify(stateStore).startResourceLocalization(eq(user), eq(appId), localResourceCaptor.capture(), pathCaptor.capture()); LocalResourceProto lrProto = localResourceCaptor.getValue(); Path localizedPath1 = pathCaptor.getValue(); Assert.assertEquals(lr1, new LocalResourceRequest(new LocalResourcePBImpl(lrProto))); Assert.assertEquals(hierarchicalPath1, localizedPath1.getParent()); // Simulate lr1 getting localized ResourceLocalizedEvent rle1 = new ResourceLocalizedEvent(lr1, pathCaptor.getValue(), 120); tracker.handle(rle1); dispatcher.await(); ArgumentCaptor<LocalizedResourceProto> localizedProtoCaptor = ArgumentCaptor.forClass(LocalizedResourceProto.class); verify(stateStore).finishResourceLocalization(eq(user), eq(appId), localizedProtoCaptor.capture()); LocalizedResourceProto localizedProto = localizedProtoCaptor.getValue(); Assert.assertEquals(lr1, new LocalResourceRequest( new LocalResourcePBImpl(localizedProto.getResource()))); Assert.assertEquals(localizedPath1.toString(), localizedProto.getLocalPath()); LocalizedResource localizedRsrc1 = tracker.getLocalizedResource(lr1); Assert.assertNotNull(localizedRsrc1); // simulate release and retention processing tracker.handle(new ResourceReleaseEvent(lr1, cId1)); dispatcher.await(); boolean removeResult = tracker.remove(localizedRsrc1, mockDelService); Assert.assertTrue(removeResult); verify(stateStore).removeLocalizedResource(eq(user), eq(appId), eq(localizedPath1)); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test @SuppressWarnings("unchecked") public void testStateStoreFailedLocalization() throws Exception { final String user = "someuser"; final ApplicationId appId = ApplicationId.newInstance(1, 1); // This is a random path. NO File creation will take place at this place. final Path localDir = new Path("/tmp"); Configuration conf = new YarnConfiguration(); DrainDispatcher dispatcher = null; dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); NMStateStoreService stateStore = mock(NMStateStoreService.class); try { LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, false, conf, stateStore); // Container 1 needs lr1 resource ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.APPLICATION); LocalizerContext lc1 = new LocalizerContext(user, cId1, null); // Container 1 requests lr1 to be localized ResourceEvent reqEvent1 = new ResourceRequestEvent(lr1, LocalResourceVisibility.APPLICATION, lc1); tracker.handle(reqEvent1); dispatcher.await(); // Simulate the process of localization of lr1 Path hierarchicalPath1 = tracker.getPathForLocalization(lr1, localDir); ArgumentCaptor<LocalResourceProto> localResourceCaptor = ArgumentCaptor.forClass(LocalResourceProto.class); ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class); verify(stateStore).startResourceLocalization(eq(user), eq(appId), localResourceCaptor.capture(), pathCaptor.capture()); LocalResourceProto lrProto = localResourceCaptor.getValue(); Path localizedPath1 = pathCaptor.getValue(); Assert.assertEquals(lr1, new LocalResourceRequest(new LocalResourcePBImpl(lrProto))); Assert.assertEquals(hierarchicalPath1, localizedPath1.getParent()); ResourceFailedLocalizationEvent rfe1 = new ResourceFailedLocalizationEvent( lr1, new Exception("Test").toString()); tracker.handle(rfe1); dispatcher.await(); verify(stateStore).removeLocalizedResource(eq(user), eq(appId), eq(localizedPath1)); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test @SuppressWarnings("unchecked") public void testRecoveredResource() throws Exception { final String user = "someuser"; final ApplicationId appId = ApplicationId.newInstance(1, 1); // This is a random path. NO File creation will take place at this place. final Path localDir = new Path("/tmp/localdir"); Configuration conf = new YarnConfiguration(); DrainDispatcher dispatcher = null; dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); NMStateStoreService stateStore = mock(NMStateStoreService.class); try { LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, false, conf, stateStore); // Container 1 needs lr1 resource ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1); LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.APPLICATION); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1 = 52; Path hierarchicalPath1 = new Path(localDir, Long.toString(localizedId1)); Path localizedPath1 = new Path(hierarchicalPath1, "resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1, localizedPath1, 120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); // verify new paths reflect recovery of previous resources LocalResourceRequest lr2 = createLocalResourceRequest(user, 2, 2, LocalResourceVisibility.APPLICATION); LocalizerContext lc2 = new LocalizerContext(user, cId1, null); ResourceEvent reqEvent2 = new ResourceRequestEvent(lr2, LocalResourceVisibility.APPLICATION, lc2); tracker.handle(reqEvent2); dispatcher.await(); Path hierarchicalPath2 = tracker.getPathForLocalization(lr2, localDir); long localizedId2 = Long.parseLong(hierarchicalPath2.getName()); Assert.assertEquals(localizedId1 + 1, localizedId2); } finally { if (dispatcher != null) { dispatcher.stop(); } } } @Test @SuppressWarnings("unchecked") public void testRecoveredResourceWithDirCacheMgr() throws Exception { final String user = "someuser"; final ApplicationId appId = ApplicationId.newInstance(1, 1); // This is a random path. NO File creation will take place at this place. final Path localDirRoot = new Path("/tmp/localdir"); Configuration conf = new YarnConfiguration(); DrainDispatcher dispatcher = null; dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); NMStateStoreService stateStore = mock(NMStateStoreService.class); try { LocalResourcesTrackerImpl tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, true, conf, stateStore); LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1 = 52; Path hierarchicalPath1 = new Path(localDirRoot + "/4/2", Long.toString(localizedId1)); Path localizedPath1 = new Path(hierarchicalPath1, "resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1, localizedPath1, 120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); LocalCacheDirectoryManager dirMgrRoot = tracker.getDirectoryManager(localDirRoot); Assert.assertEquals(0, dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1, dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr2 = createLocalResourceRequest(user, 2, 2, LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr2)); final long localizedId2 = localizedId1 + 1; Path hierarchicalPath2 = new Path(localDirRoot + "/4/2", Long.toString(localizedId2)); Path localizedPath2 = new Path(hierarchicalPath2, "resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr2, localizedPath2, 120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr2)); Assert.assertEquals(0, dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2, dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr3 = createLocalResourceRequest(user, 3, 3, LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr3)); final long localizedId3 = 128; Path hierarchicalPath3 = new Path(localDirRoot + "/4/3", Long.toString(localizedId3)); Path localizedPath3 = new Path(hierarchicalPath3, "resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr3, localizedPath3, 120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr3)); Assert.assertEquals(0, dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2, dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1, dirMgrRoot.getDirectory("4/3").getCount()); LocalResourceRequest lr4 = createLocalResourceRequest(user, 4, 4, LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr4)); final long localizedId4 = 256; Path hierarchicalPath4 = new Path(localDirRoot + "/4", Long.toString(localizedId4)); Path localizedPath4 = new Path(hierarchicalPath4, "resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr4, localizedPath4, 120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr4)); Assert.assertEquals(0, dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1, dirMgrRoot.getDirectory("4").getCount()); Assert.assertEquals(2, dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1, dirMgrRoot.getDirectory("4/3").getCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } } private boolean createdummylocalizefile(Path path) { boolean ret = false; File file = new File(path.toUri().getRawPath().toString()); try { ret = file.createNewFile(); } catch (IOException e) { e.printStackTrace(); } return ret; } private void verifyTrackedResourceCount(LocalResourcesTracker tracker, int expected) { int count = 0; Iterator<LocalizedResource> iter = tracker.iterator(); while (iter.hasNext()) { iter.next(); count++; } Assert.assertEquals("Tracker resource count does not match", expected, count); } private LocalResourceRequest createLocalResourceRequest(String user, int i, long ts, LocalResourceVisibility vis) { final LocalResourceRequest req = new LocalResourceRequest(new Path("file:///tmp/" + user + "/rsrc" + i), ts + i * 2000, LocalResourceType.FILE, vis, null); return req; } private LocalizedResource createLocalizedResource(LocalResourceRequest req, Dispatcher dispatcher) { LocalizedResource lr = new LocalizedResource(req, dispatcher); return lr; } private DrainDispatcher createDispatcher(Configuration conf) { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); return dispatcher; } }
35,758
42.135103
138
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/TestSharedCacheUploader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse; import org.junit.Test; public class TestSharedCacheUploader { /** * If verifyAccess fails, the upload should fail */ @Test public void testFailVerifyAccess() throws Exception { SharedCacheUploader spied = createSpiedUploader(); doReturn(false).when(spied).verifyAccess(); assertFalse(spied.call()); } /** * If rename fails, the upload should fail */ @Test public void testRenameFail() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); Path localPath = mock(Path.class); when(localPath.getName()).thenReturn("foo.jar"); String user = "joe"; SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.class); SCMUploaderNotifyResponse response = mock(SCMUploaderNotifyResponse.class); when(response.getAccepted()).thenReturn(true); when(scmClient.notify(isA(SCMUploaderNotifyRequest.class))). thenReturn(response); FileSystem fs = mock(FileSystem.class); // return false when rename is called when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(false); FileSystem localFs = FileSystem.getLocal(conf); SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs); // stub verifyAccess() to return true doReturn(true).when(spied).verifyAccess(); // stub getActualPath() doReturn(localPath).when(spied).getActualPath(); // stub computeChecksum() doReturn("abcdef0123456789").when(spied).computeChecksum(isA(Path.class)); // stub uploadFile() to return true doReturn(true).when(spied).uploadFile(isA(Path.class), isA(Path.class)); assertFalse(spied.call()); } /** * If verifyAccess, uploadFile, rename, and notification succeed, the upload * should succeed */ @Test public void testSuccess() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); Path localPath = mock(Path.class); when(localPath.getName()).thenReturn("foo.jar"); String user = "joe"; SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.class); SCMUploaderNotifyResponse response = mock(SCMUploaderNotifyResponse.class); when(response.getAccepted()).thenReturn(true); when(scmClient.notify(isA(SCMUploaderNotifyRequest.class))). thenReturn(response); FileSystem fs = mock(FileSystem.class); // return false when rename is called when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true); FileSystem localFs = FileSystem.getLocal(conf); SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs); // stub verifyAccess() to return true doReturn(true).when(spied).verifyAccess(); // stub getActualPath() doReturn(localPath).when(spied).getActualPath(); // stub computeChecksum() doReturn("abcdef0123456789").when(spied).computeChecksum(isA(Path.class)); // stub uploadFile() to return true doReturn(true).when(spied).uploadFile(isA(Path.class), isA(Path.class)); // stub notifySharedCacheManager to return true doReturn(true).when(spied).notifySharedCacheManager(isA(String.class), isA(String.class)); assertTrue(spied.call()); } /** * If verifyAccess, uploadFile, and rename succed, but it receives a nay from * SCM, the file should be deleted */ @Test public void testNotifySCMFail() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); Path localPath = mock(Path.class); when(localPath.getName()).thenReturn("foo.jar"); String user = "joe"; FileSystem fs = mock(FileSystem.class); // return false when rename is called when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true); FileSystem localFs = FileSystem.getLocal(conf); SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, null, fs, localFs); // stub verifyAccess() to return true doReturn(true).when(spied).verifyAccess(); // stub getActualPath() doReturn(localPath).when(spied).getActualPath(); // stub computeChecksum() doReturn("abcdef0123456789").when(spied).computeChecksum(isA(Path.class)); // stub uploadFile() to return true doReturn(true).when(spied).uploadFile(isA(Path.class), isA(Path.class)); // stub notifySharedCacheManager to return true doReturn(false).when(spied).notifySharedCacheManager(isA(String.class), isA(String.class)); assertFalse(spied.call()); verify(fs).delete(isA(Path.class), anyBoolean()); } /** * If resource is public, verifyAccess should succeed */ @Test public void testVerifyAccessPublicResource() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); // give public visibility when(resource.getVisibility()).thenReturn(LocalResourceVisibility.PUBLIC); Path localPath = mock(Path.class); when(localPath.getName()).thenReturn("foo.jar"); String user = "joe"; SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.class); FileSystem fs = mock(FileSystem.class); FileSystem localFs = FileSystem.getLocal(conf); SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs); assertTrue(spied.verifyAccess()); } /** * If the localPath does not exists, getActualPath should get to one level * down */ @Test public void testGetActualPath() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); // give public visibility when(resource.getVisibility()).thenReturn(LocalResourceVisibility.PUBLIC); Path localPath = new Path("foo.jar"); String user = "joe"; SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.class); FileSystem fs = mock(FileSystem.class); FileSystem localFs = mock(FileSystem.class); // stub it to return a status that indicates a directory FileStatus status = mock(FileStatus.class); when(status.isDirectory()).thenReturn(true); when(localFs.getFileStatus(localPath)).thenReturn(status); SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs); Path actualPath = spied.getActualPath(); assertEquals(actualPath.getName(), localPath.getName()); assertEquals(actualPath.getParent().getName(), localPath.getName()); } private SharedCacheUploader createSpiedUploader() throws IOException { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true); LocalResource resource = mock(LocalResource.class); Path localPath = mock(Path.class); String user = "foo"; SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.class); FileSystem fs = FileSystem.get(conf); FileSystem localFs = FileSystem.getLocal(conf); return createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs); } private SharedCacheUploader createSpiedUploader(LocalResource resource, Path localPath, String user, Configuration conf, SCMUploaderProtocol scmClient, FileSystem fs, FileSystem localFs) throws IOException { SharedCacheUploader uploader = new SharedCacheUploader(resource, localPath, user, conf, scmClient, fs, localFs); return spy(uploader); } }
9,871
39.793388
102
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/TestSharedCacheUploadService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.sharedcache; import static org.junit.Assert.assertSame; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Test; public class TestSharedCacheUploadService { @Test public void testInitDisabled() { testInit(false); } @Test public void testInitEnabled() { testInit(true); } public void testInit(boolean enabled) { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, enabled); SharedCacheUploadService service = new SharedCacheUploadService(); service.init(conf); assertSame(enabled, service.isEnabled()); service.stop(); } }
1,559
29.588235
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.NotSerializableException; import java.io.ObjectInputStream; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.HashMap; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.InlineDispatcher; import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; import org.mockito.exceptions.verification.WantedButNotInvoked; import org.mockito.internal.matchers.VarargMatcher; public class TestNonAggregatingLogHandler { DeletionService mockDelService; Configuration conf; DrainDispatcher dispatcher; EventHandler<ApplicationEvent> appEventHandler; String user = "testuser"; ApplicationId appId; ApplicationAttemptId appAttemptId; ContainerId container11; LocalDirsHandlerService dirsHandler; @Before @SuppressWarnings("unchecked") public void setup() { mockDelService = mock(DeletionService.class); conf = new YarnConfiguration(); dispatcher = createDispatcher(conf); appEventHandler = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, appEventHandler); appId = BuilderUtils.newApplicationId(1234, 1); appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); container11 = BuilderUtils.newContainerId(appAttemptId, 1); dirsHandler = new LocalDirsHandlerService(); } @After public void tearDown() throws IOException { dirsHandler.stop(); dirsHandler.close(); dispatcher.await(); dispatcher.stop(); dispatcher.close(); } @Test public void testLogDeletion() throws IOException { File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 2); String localLogDirsString = localLogDirs[0].getAbsolutePath() + "," + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l); dirsHandler.init(conf); NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, dirsHandler, new NMNullStateStoreService()); NonAggregatingLogHandler logHandler = spy(rawLogHandler); AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem()); FileContext lfs = FileContext.getFileContext(spylfs, conf); doReturn(lfs).when(logHandler) .getLocalFileContext(isA(Configuration.class)); FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask()); final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(localLogDirs[0].getAbsolutePath())); doReturn(fs).when(spylfs).getFileStatus(isA(Path.class)); logHandler.init(conf); logHandler.start(); logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, null)); logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0)); logHandler.handle(new LogHandlerAppFinishedEvent(appId)); Path[] localAppLogDirs = new Path[2]; localAppLogDirs[0] = new Path(localLogDirs[0].getAbsolutePath(), appId.toString()); localAppLogDirs[1] = new Path(localLogDirs[1].getAbsolutePath(), appId.toString()); testDeletionServiceCall(mockDelService, user, 5000, localAppLogDirs); logHandler.close(); for (int i = 0; i < localLogDirs.length; i++) { FileUtils.deleteDirectory(localLogDirs[i]); } } @Test public void testDelayedDelete() throws IOException { File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 2); String localLogDirsString = localLogDirs[0].getAbsolutePath() + "," + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS); dirsHandler.init(conf); NonAggregatingLogHandler logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler); logHandler.init(conf); logHandler.start(); logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, null)); logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0)); logHandler.handle(new LogHandlerAppFinishedEvent(appId)); Path[] localAppLogDirs = new Path[2]; localAppLogDirs[0] = new Path(localLogDirs[0].getAbsolutePath(), appId.toString()); localAppLogDirs[1] = new Path(localLogDirs[1].getAbsolutePath(), appId.toString()); ScheduledThreadPoolExecutor mockSched = ((NonAggregatingLogHandlerWithMockExecutor) logHandler).mockSched; verify(mockSched).schedule(any(Runnable.class), eq(10800l), eq(TimeUnit.SECONDS)); logHandler.close(); for (int i = 0; i < localLogDirs.length; i++) { FileUtils.deleteDirectory(localLogDirs[i]); } } @Test public void testStop() throws Exception { NonAggregatingLogHandler aggregatingLogHandler = new NonAggregatingLogHandler(null, null, null, new NMNullStateStoreService()); // It should not throw NullPointerException aggregatingLogHandler.stop(); NonAggregatingLogHandlerWithMockExecutor logHandler = new NonAggregatingLogHandlerWithMockExecutor(null, null, null); logHandler.init(new Configuration()); logHandler.stop(); verify(logHandler.mockSched).shutdown(); verify(logHandler.mockSched) .awaitTermination(eq(10l), eq(TimeUnit.SECONDS)); verify(logHandler.mockSched).shutdownNow(); logHandler.close(); aggregatingLogHandler.close(); } @Test public void testHandlingApplicationFinishedEvent() throws IOException { DeletionService delService = new DeletionService(null); NonAggregatingLogHandler aggregatingLogHandler = new NonAggregatingLogHandler(new InlineDispatcher(), delService, dirsHandler, new NMNullStateStoreService()); dirsHandler.init(conf); dirsHandler.start(); delService.init(conf); delService.start(); aggregatingLogHandler.init(conf); aggregatingLogHandler.start(); // It should NOT throw RejectedExecutionException aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId)); aggregatingLogHandler.stop(); // It should NOT throw RejectedExecutionException after stopping // handler service. aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId)); aggregatingLogHandler.close(); } private class NonAggregatingLogHandlerWithMockExecutor extends NonAggregatingLogHandler { private ScheduledThreadPoolExecutor mockSched; public NonAggregatingLogHandlerWithMockExecutor(Dispatcher dispatcher, DeletionService delService, LocalDirsHandlerService dirsHandler) { this(dispatcher, delService, dirsHandler, new NMNullStateStoreService()); } public NonAggregatingLogHandlerWithMockExecutor(Dispatcher dispatcher, DeletionService delService, LocalDirsHandlerService dirsHandler, NMStateStoreService stateStore) { super(dispatcher, delService, dirsHandler, stateStore); } @Override ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor( Configuration conf) { mockSched = mock(ScheduledThreadPoolExecutor.class); return mockSched; } } private DrainDispatcher createDispatcher(Configuration conf) { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); return dispatcher; } /* * Test to ensure that we handle the cleanup of directories that may not have * the application log dirs we're trying to delete or may have other problems. * Test creates 7 log dirs, and fails the directory check for 4 of them and * then checks to ensure we tried to delete only the ones that passed the * check. */ @Test public void testFailedDirLogDeletion() throws Exception { File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 7); final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length); for (int i = 0; i < localLogDirs.length; i++) { localLogDirPaths.add(localLogDirs[i].getAbsolutePath()); } String localLogDirsString = StringUtils.join(localLogDirPaths, ","); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l); LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class); NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, mockDirsHandler, new NMNullStateStoreService()); NonAggregatingLogHandler logHandler = spy(rawLogHandler); AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem()); FileContext lfs = FileContext.getFileContext(spylfs, conf); doReturn(lfs).when(logHandler) .getLocalFileContext(isA(Configuration.class)); logHandler.init(conf); logHandler.start(); runMockedFailedDirs(logHandler, appId, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs); logHandler.close(); } @Test public void testRecovery() throws Exception { File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 2); String localLogDirsString = localLogDirs[0].getAbsolutePath() + "," + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS); dirsHandler.init(conf); NMStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NonAggregatingLogHandlerWithMockExecutor logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, null)); logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0)); logHandler.handle(new LogHandlerAppFinishedEvent(appId)); // simulate a restart and verify deletion is rescheduled logHandler.close(); logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); ArgumentCaptor<Runnable> schedArg = ArgumentCaptor.forClass(Runnable.class); verify(logHandler.mockSched).schedule(schedArg.capture(), anyLong(), eq(TimeUnit.MILLISECONDS)); // execute the runnable and verify another restart has nothing scheduled schedArg.getValue().run(); logHandler.close(); logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); verify(logHandler.mockSched, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); logHandler.close(); } /** * Function to run a log handler with directories failing the getFileStatus * call. The function accepts the log handler, setup the mocks to fail with * specific exceptions and ensures the deletion service has the correct calls. * * @param logHandler the logHandler implementation to test * * @param appId the application id that we wish when sending events to the log * handler * * @param user the user name to use * * @param mockDelService a mock of the DeletionService which we will verify * the delete calls against * * @param dirsHandler a spy or mock on the LocalDirsHandler service used to * when creating the logHandler. It needs to be a spy so that we can intercept * the getAllLogDirs() call. * * @param conf the configuration used * * @param spylfs a spy on the AbstractFileSystem object used when creating lfs * * @param lfs the FileContext object to be used to mock the getFileStatus() * calls * * @param localLogDirs list of the log dirs to run the test against, must have * at least 7 entries */ public static void runMockedFailedDirs(LogHandler logHandler, ApplicationId appId, String user, DeletionService mockDelService, LocalDirsHandlerService dirsHandler, Configuration conf, AbstractFileSystem spylfs, FileContext lfs, File[] localLogDirs) throws Exception { Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>(); if (localLogDirs.length < 7) { throw new IllegalArgumentException( "Argument localLogDirs must be at least of length 7"); } Path[] localAppLogDirPaths = new Path[localLogDirs.length]; for (int i = 0; i < localAppLogDirPaths.length; i++) { localAppLogDirPaths[i] = new Path(localLogDirs[i].getAbsolutePath(), appId.toString()); } final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length); for (int i = 0; i < localLogDirs.length; i++) { localLogDirPaths.add(localLogDirs[i].getAbsolutePath()); } // setup mocks FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask()); final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(localLogDirs[0].getAbsolutePath())); doReturn(fs).when(spylfs).getFileStatus(isA(Path.class)); doReturn(localLogDirPaths).when(dirsHandler).getLogDirsForCleanup(); logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, appAcls)); // test case where some dirs have the log dir to delete // mock some dirs throwing various exceptions // verify deletion happens only on the others Mockito.doThrow(new FileNotFoundException()).when(spylfs) .getFileStatus(eq(localAppLogDirPaths[0])); doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[1])); Mockito.doThrow(new AccessControlException()).when(spylfs) .getFileStatus(eq(localAppLogDirPaths[2])); doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[3])); Mockito.doThrow(new IOException()).when(spylfs) .getFileStatus(eq(localAppLogDirPaths[4])); Mockito.doThrow(new UnsupportedFileSystemException("test")).when(spylfs) .getFileStatus(eq(localAppLogDirPaths[5])); doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[6])); logHandler.handle(new LogHandlerAppFinishedEvent(appId)); testDeletionServiceCall(mockDelService, user, 5000, localAppLogDirPaths[1], localAppLogDirPaths[3], localAppLogDirPaths[6]); return; } static class DeletePathsMatcher extends ArgumentMatcher<Path[]> implements VarargMatcher { // to get rid of serialization warning static final long serialVersionUID = 0; private transient Path[] matchPaths; DeletePathsMatcher(Path... matchPaths) { this.matchPaths = matchPaths; } @Override public boolean matches(Object varargs) { return new EqualsBuilder().append(matchPaths, varargs).isEquals(); } // function to get rid of FindBugs warning private void readObject(ObjectInputStream os) throws NotSerializableException { throw new NotSerializableException(this.getClass().getName()); } } /** * Function to verify that the DeletionService object received the right * requests. * * @param delService the DeletionService mock which we verify against * * @param user the user name to use when verifying the deletion * * @param timeout amount in milliseconds to wait before we decide the calls * didn't come through * * @param matchPaths the paths to match in the delete calls * * @throws WantedButNotInvoked if the calls could not be verified */ static void testDeletionServiceCall(DeletionService delService, String user, long timeout, Path... matchPaths) { long verifyStartTime = System.currentTimeMillis(); WantedButNotInvoked notInvokedException = null; boolean matched = false; while (!matched && System.currentTimeMillis() < verifyStartTime + timeout) { try { verify(delService).delete(eq(user), (Path) eq(null), Mockito.argThat(new DeletePathsMatcher(matchPaths))); matched = true; } catch (WantedButNotInvoked e) { notInvokedException = e; try { Thread.sleep(50l); } catch (InterruptedException i) { } } } if (!matched) { throw notInvokedException; } return; } public static File[] getLocalLogDirFiles(String name, int number) { File[] dirs = new File[number]; for (int i = 0; i < dirs.length; i++) { dirs[i] = new File("target", name + "-localLogDir" + i).getAbsoluteFile(); } return dirs; } }
21,282
38.123162
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.metrics2.impl.MetricsRecords; import org.apache.hadoop.yarn.api.records.ContainerId; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; public class TestContainerMetrics { @Test public void testContainerMetricsFlow() throws InterruptedException { final String ERR = "Error in number of records"; // Create a dummy MetricsSystem MetricsSystem system = mock(MetricsSystem.class); doReturn(this).when(system).register(anyString(), anyString(), any()); MetricsCollectorImpl collector = new MetricsCollectorImpl(); ContainerId containerId = mock(ContainerId.class); ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100); metrics.recordMemoryUsage(1024); metrics.getMetrics(collector, true); assertEquals(ERR, 0, collector.getRecords().size()); Thread.sleep(110); metrics.getMetrics(collector, true); assertEquals(ERR, 1, collector.getRecords().size()); collector.clear(); Thread.sleep(110); metrics.getMetrics(collector, true); assertEquals(ERR, 1, collector.getRecords().size()); collector.clear(); metrics.finished(); metrics.getMetrics(collector, true); assertEquals(ERR, 1, collector.getRecords().size()); collector.clear(); metrics.getMetrics(collector, true); assertEquals(ERR, 0, collector.getRecords().size()); Thread.sleep(110); metrics.getMetrics(collector, true); assertEquals(ERR, 0, collector.getRecords().size()); } @Test public void testContainerMetricsLimit() throws InterruptedException { final String ERR = "Error in number of records"; MetricsSystem system = mock(MetricsSystem.class); doReturn(this).when(system).register(anyString(), anyString(), any()); MetricsCollectorImpl collector = new MetricsCollectorImpl(); ContainerId containerId = mock(ContainerId.class); ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100); int anyPmemLimit = 1024; int anyVmemLimit = 2048; int anyVcores = 10; long anyLaunchDuration = 20L; long anyLocalizationDuration = 1000L; String anyProcessId = "1234"; metrics.recordResourceLimit(anyVmemLimit, anyPmemLimit, anyVcores); metrics.recordProcessId(anyProcessId); metrics.recordStateChangeDurations(anyLaunchDuration, anyLocalizationDuration); Thread.sleep(110); metrics.getMetrics(collector, true); assertEquals(ERR, 1, collector.getRecords().size()); MetricsRecord record = collector.getRecords().get(0); MetricsRecords.assertTag(record, ContainerMetrics.PROCESSID_INFO.name(), anyProcessId); MetricsRecords.assertMetric(record, ContainerMetrics .PMEM_LIMIT_METRIC_NAME, anyPmemLimit); MetricsRecords.assertMetric(record, ContainerMetrics.VMEM_LIMIT_METRIC_NAME, anyVmemLimit); MetricsRecords.assertMetric(record, ContainerMetrics.VCORE_LIMIT_METRIC_NAME, anyVcores); MetricsRecords.assertMetric(record, ContainerMetrics.LAUNCH_DURATION_METRIC_NAME, anyLaunchDuration); MetricsRecords.assertMetric(record, ContainerMetrics.LOCALIZATION_DURATION_METRIC_NAME, anyLocalizationDuration); collector.clear(); } }
4,495
36.157025
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Pattern; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.TestProcfsBasedProcessTree; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestContainersMonitor extends BaseContainerManagerTest { public TestContainersMonitor() throws UnsupportedFileSystemException { super(); } static { LOG = LogFactory.getLog(TestContainersMonitor.class); } @Before public void setup() throws IOException { conf.setClass( YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class); conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true); super.setup(); } /** * Test to verify the check for whether a process tree is over limit or not. * * @throws IOException * if there was a problem setting up the fake procfs directories or * files. */ @Test public void testProcessTreeLimits() throws IOException { // set up a dummy proc file system File procfsRootDir = new File(localDir, "proc"); String[] pids = { "100", "200", "300", "400", "500", "600", "700" }; try { TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir); // create pid dirs. TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir, pids); // create process infos. TestProcfsBasedProcessTree.ProcessStatInfo[] procs = new TestProcfsBasedProcessTree.ProcessStatInfo[7]; // assume pids 100, 500 are in 1 tree // 200,300,400 are in another // 600,700 are in a third procs[0] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "100", "proc1", "1", "100", "100", "100000" }); procs[1] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "200", "proc2", "1", "200", "200", "200000" }); procs[2] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "300", "proc3", "200", "200", "200", "300000" }); procs[3] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "400", "proc4", "200", "200", "200", "400000" }); procs[4] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "500", "proc5", "100", "100", "100", "1500000" }); procs[5] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "600", "proc6", "1", "600", "600", "100000" }); procs[6] = new TestProcfsBasedProcessTree.ProcessStatInfo( new String[] { "700", "proc7", "600", "600", "600", "100000" }); // write stat files. TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir, pids, procs, null); // vmem limit long limit = 700000; ContainersMonitorImpl test = new ContainersMonitorImpl(null, null, null); // create process trees // tree rooted at 100 is over limit immediately, as it is // twice over the mem limit. ProcfsBasedProcessTree pTree = new ProcfsBasedProcessTree( "100", procfsRootDir.getAbsolutePath()); pTree.updateProcessTree(); assertTrue("tree rooted at 100 should be over limit " + "after first iteration.", test.isProcessTreeOverLimit(pTree, "dummyId", limit)); // the tree rooted at 200 is initially below limit. pTree = new ProcfsBasedProcessTree("200", procfsRootDir.getAbsolutePath()); pTree.updateProcessTree(); assertFalse("tree rooted at 200 shouldn't be over limit " + "after one iteration.", test.isProcessTreeOverLimit(pTree, "dummyId", limit)); // second iteration - now the tree has been over limit twice, // hence it should be declared over limit. pTree.updateProcessTree(); assertTrue( "tree rooted at 200 should be over limit after 2 iterations", test.isProcessTreeOverLimit(pTree, "dummyId", limit)); // the tree rooted at 600 is never over limit. pTree = new ProcfsBasedProcessTree("600", procfsRootDir.getAbsolutePath()); pTree.updateProcessTree(); assertFalse("tree rooted at 600 should never be over limit.", test.isProcessTreeOverLimit(pTree, "dummyId", limit)); // another iteration does not make any difference. pTree.updateProcessTree(); assertFalse("tree rooted at 600 should never be over limit.", test.isProcessTreeOverLimit(pTree, "dummyId", limit)); } finally { FileUtil.fullyDelete(procfsRootDir); } } @Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException { if (!ProcfsBasedProcessTree.isAvailable()) { return; } containerManager.start(); File scriptFile = new File(tmpDir, "scriptFile.sh"); PrintWriter fileWriter = new PrintWriter(scriptFile); File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile(); fileWriter.write("\numask 0"); // So that start file is readable by the // test. fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nsleep 15"); fileWriter.close(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); int port = 12345; URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List<String> commands = new ArrayList<String>(); commands.add("/bin/bash"); commands.add(scriptFile.getAbsolutePath()); containerLaunchContext.setCommands(commands); Resource r = BuilderUtils.newResource(8 * 1024 * 1024, 1); ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0); Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager() .createPassword(containerIdentifier), containerIdentifier); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs = 0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists()); // Now verify the contents of the file BufferedReader reader = new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!", reader.readLine()); // Get the pid of the process String pid = reader.readLine().trim(); // No more lines Assert.assertEquals(null, reader.readLine()); BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(cId); GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus()); String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n"; Pattern pat = Pattern.compile(expectedMsgPattern); Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find()); // Assert that the process is not alive anymore Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder() .setUser(user) .setPid(pid) .setSignal(Signal.NULL) .build())); } @Test(timeout = 20000) public void testContainerMonitorMemFlags() { ContainersMonitor cm = null; long expPmem = 8192 * 1024 * 1024l; long expVmem = (long) (expPmem * 2.1f); cm = new ContainersMonitorImpl(mock(ContainerExecutor.class), mock(AsyncDispatcher.class), mock(Context.class)); cm.init(getConfForCM(false, false, 8192, 2.1f)); assertEquals(expPmem, cm.getPmemAllocatedForContainers()); assertEquals(expVmem, cm.getVmemAllocatedForContainers()); assertEquals(false, cm.isPmemCheckEnabled()); assertEquals(false, cm.isVmemCheckEnabled()); cm = new ContainersMonitorImpl(mock(ContainerExecutor.class), mock(AsyncDispatcher.class), mock(Context.class)); cm.init(getConfForCM(true, false, 8192, 2.1f)); assertEquals(expPmem, cm.getPmemAllocatedForContainers()); assertEquals(expVmem, cm.getVmemAllocatedForContainers()); assertEquals(true, cm.isPmemCheckEnabled()); assertEquals(false, cm.isVmemCheckEnabled()); cm = new ContainersMonitorImpl(mock(ContainerExecutor.class), mock(AsyncDispatcher.class), mock(Context.class)); cm.init(getConfForCM(true, true, 8192, 2.1f)); assertEquals(expPmem, cm.getPmemAllocatedForContainers()); assertEquals(expVmem, cm.getVmemAllocatedForContainers()); assertEquals(true, cm.isPmemCheckEnabled()); assertEquals(true, cm.isVmemCheckEnabled()); cm = new ContainersMonitorImpl(mock(ContainerExecutor.class), mock(AsyncDispatcher.class), mock(Context.class)); cm.init(getConfForCM(false, true, 8192, 2.1f)); assertEquals(expPmem, cm.getPmemAllocatedForContainers()); assertEquals(expVmem, cm.getVmemAllocatedForContainers()); assertEquals(false, cm.isPmemCheckEnabled()); assertEquals(true, cm.isVmemCheckEnabled()); } private YarnConfiguration getConfForCM(boolean pMemEnabled, boolean vMemEnabled, int nmPmem, float vMemToPMemRatio) { YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.NM_PMEM_MB, nmPmem); conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, pMemEnabled); conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, vMemEnabled); conf.setFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO, vMemToPMemRatio); return conf; } }
15,521
43.475645
91
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyMap; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.EOFException; import java.io.File; import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.io.Writer; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.InlineDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader; import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.TestNonAggregatingLogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.mortbay.util.MultiException; import com.google.common.base.Supplier; //@Ignore public class TestLogAggregationService extends BaseContainerManagerTest { private Map<ApplicationAccessType, String> acls = createAppAcls(); static { LOG = LogFactory.getLog(TestLogAggregationService.class); } private static RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); private File remoteRootLogDir = new File("target", this.getClass() .getName() + "-remoteLogDir"); public TestLogAggregationService() throws UnsupportedFileSystemException { super(); this.remoteRootLogDir.mkdir(); } DrainDispatcher dispatcher; EventHandler<ApplicationEvent> appEventHandler; @Override @SuppressWarnings("unchecked") public void setup() throws IOException { super.setup(); NodeId nodeId = NodeId.newInstance("0.0.0.0", 5555); ((NMContext)context).setNodeId(nodeId); dispatcher = createDispatcher(); appEventHandler = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, appEventHandler); UserGroupInformation.setConfiguration(conf); } @Override public void tearDown() throws IOException, InterruptedException { super.tearDown(); createContainerExecutor().deleteAsUser(new DeletionAsUserContext.Builder() .setUser(user) .setSubDir(new Path(remoteRootLogDir.getAbsolutePath())) .setBasedirs(new Path[] {}) .build()); dispatcher.await(); dispatcher.stop(); dispatcher.close(); } private void verifyLocalFileDeletion( LogAggregationService logAggregationService) throws Exception { logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); // AppLogDir should be created File app1LogDir = new File(localLogDir, ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService .handle(new LogHandlerAppStartedEvent( application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1); ContainerId container11 = BuilderUtils.newContainerId(appAttemptId, 1); // Simulate log-file creation writeContainerLogs(app1LogDir, container11, new String[] { "stdout", "stderr", "syslog" }); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container11, 0)); logAggregationService.handle(new LogHandlerAppFinishedEvent( application1)); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); // ensure filesystems were closed verify(logAggregationService).closeFileSystems( any(UserGroupInformation.class)); verify(delSrvc).delete(eq(user), eq((Path) null), eq(new Path(app1LogDir.getAbsolutePath()))); delSrvc.stop(); String containerIdStr = ConverterUtils.toString(container11); File containerLogDir = new File(app1LogDir, containerIdStr); for (String fileType : new String[] { "stdout", "stderr", "syslog" }) { File f = new File(containerLogDir, fileType); Assert.assertFalse("check "+f, f.exists()); } Assert.assertFalse(app1LogDir.exists()); Path logFilePath = logAggregationService.getRemoteNodeLogFileForApp(application1, this.user); Assert.assertTrue("Log file [" + logFilePath + "] not found", new File( logFilePath.toUri().getPath()).exists()); dispatcher.await(); ApplicationEvent expectedEvents[] = new ApplicationEvent[]{ new ApplicationEvent( appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent( appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) }; checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID"); } @Test public void testLocalFileDeletionAfterUpload() throws Exception { this.delSrvc = new DeletionService(createContainerExecutor()); delSrvc = spy(delSrvc); this.delSrvc.init(conf); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler)); verifyLocalFileDeletion(logAggregationService); } @Test public void testLocalFileDeletionOnDiskFull() throws Exception { this.delSrvc = new DeletionService(createContainerExecutor()); delSrvc = spy(delSrvc); this.delSrvc.init(conf); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); List<String> logDirs = super.dirsHandler.getLogDirs(); LocalDirsHandlerService dirsHandler = spy(super.dirsHandler); // Simulate disk being full by returning no good log dirs but having a // directory in full log dirs. when(dirsHandler.getLogDirs()).thenReturn(new ArrayList<String>()); when(dirsHandler.getLogDirsForRead()).thenReturn(logDirs); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, dirsHandler)); verifyLocalFileDeletion(logAggregationService); } /* Test to verify fix for YARN-3793 */ @Test public void testNoLogsUploadedOnAppFinish() throws Exception { this.delSrvc = new DeletionService(createContainerExecutor()); delSrvc = spy(delSrvc); this.delSrvc.init(conf); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = new LogAggregationService( dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId app = BuilderUtils.newApplicationId(1234, 1); File appLogDir = new File(localLogDir, ConverterUtils.toString(app)); appLogDir.mkdir(); LogAggregationContext context = LogAggregationContext.newInstance("HOST*", "sys*"); logAggregationService.handle(new LogHandlerAppStartedEvent(app, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, context)); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app, 1); ContainerId cont = BuilderUtils.newContainerId(appAttemptId, 1); writeContainerLogs(appLogDir, cont, new String[] { "stdout", "stderr", "syslog" }); logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont, 0)); logAggregationService.handle(new LogHandlerAppFinishedEvent(app)); logAggregationService.stop(); delSrvc.stop(); // Aggregated logs should not be deleted if not uploaded. verify(delSrvc, times(0)).delete(user, null); } @Test public void testNoContainerOnNode() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); // AppLogDir should be created File app1LogDir = new File(localLogDir, ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService .handle(new LogHandlerAppStartedEvent( application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); logAggregationService.handle(new LogHandlerAppFinishedEvent( application1)); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); Assert.assertFalse(new File(logAggregationService .getRemoteNodeLogFileForApp(application1, this.user).toUri().getPath()) .exists()); dispatcher.await(); ApplicationEvent expectedEvents[] = new ApplicationEvent[]{ new ApplicationEvent( application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent( application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) }; checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID"); logAggregationService.close(); } @Test @SuppressWarnings("unchecked") public void testMultipleAppsLogAggregation() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); String[] fileNames = new String[] { "stdout", "stderr", "syslog" }; LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); // AppLogDir should be created File app1LogDir = new File(localLogDir, ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService .handle(new LogHandlerAppStartedEvent( application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(application1, 1); ContainerId container11 = BuilderUtils.newContainerId(appAttemptId1, 1); // Simulate log-file creation writeContainerLogs(app1LogDir, container11, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container11, 0)); ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2); ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(application2, 1); File app2LogDir = new File(localLogDir, ConverterUtils.toString(application2)); app2LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent( application2, this.user, null, ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY, this.acls)); ContainerId container21 = BuilderUtils.newContainerId(appAttemptId2, 1); writeContainerLogs(app2LogDir, container21, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container21, 0)); ContainerId container12 = BuilderUtils.newContainerId(appAttemptId1, 2); writeContainerLogs(app1LogDir, container12, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container12, 0)); ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3); ApplicationAttemptId appAttemptId3 = BuilderUtils.newApplicationAttemptId(application3, 1); File app3LogDir = new File(localLogDir, ConverterUtils.toString(application3)); app3LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application3, this.user, null, ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls)); dispatcher.await(); ApplicationEvent expectedInitEvents[] = new ApplicationEvent[]{ new ApplicationEvent( application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent( application2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent( application3, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) }; checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID"); reset(appEventHandler); ContainerId container31 = BuilderUtils.newContainerId(appAttemptId3, 1); writeContainerLogs(app3LogDir, container31, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container31, 0)); ContainerId container32 = BuilderUtils.newContainerId(appAttemptId3, 2); writeContainerLogs(app3LogDir, container32, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container32, 1)); // Failed ContainerId container22 = BuilderUtils.newContainerId(appAttemptId2, 2); writeContainerLogs(app2LogDir, container22, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container22, 0)); ContainerId container33 = BuilderUtils.newContainerId(appAttemptId3, 3); writeContainerLogs(app3LogDir, container33, fileNames); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container33, 0)); logAggregationService.handle(new LogHandlerAppFinishedEvent( application2)); logAggregationService.handle(new LogHandlerAppFinishedEvent( application3)); logAggregationService.handle(new LogHandlerAppFinishedEvent( application1)); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); verifyContainerLogs(logAggregationService, application1, new ContainerId[] { container11, container12 }, fileNames, 3, false); verifyContainerLogs(logAggregationService, application2, new ContainerId[] { container21 }, fileNames, 3, false); verifyContainerLogs(logAggregationService, application3, new ContainerId[] { container31, container32 }, fileNames, 3, false); dispatcher.await(); ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[]{ new ApplicationEvent( application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent( application2, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent( application3, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) }; checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID"); } @Test public void testVerifyAndCreateRemoteDirsFailure() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler)); logAggregationService.init(this.conf); YarnRuntimeException e = new YarnRuntimeException("KABOOM!"); doThrow(e) .when(logAggregationService).verifyAndCreateRemoteLogDir( any(Configuration.class)); logAggregationService.start(); // Now try to start an application ApplicationId appId = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls)); dispatcher.await(); // Verify that it failed ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); Mockito.reset(logAggregationService); // Now try to start another one ApplicationId appId2 = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); File appLogDir = new File(localLogDir, ConverterUtils.toString(appId2)); appLogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(appId2, this.user, null, ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls)); dispatcher.await(); // Verify that it worked expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appId, // original failure ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED), new ApplicationEvent(appId2, // success ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); logAggregationService.stop(); } @Test public void testVerifyAndCreateRemoteDirNonExistence() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); File aNewFile = new File(String.valueOf("tmp"+System.currentTimeMillis())); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, aNewFile.getAbsolutePath()); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler)); logAggregationService.init(this.conf); boolean existsBefore = aNewFile.exists(); assertTrue("The new file already exists!", !existsBefore); logAggregationService.verifyAndCreateRemoteLogDir(this.conf); boolean existsAfter = aNewFile.exists(); assertTrue("The new aggregate file is not successfully created", existsAfter); aNewFile.delete(); //housekeeping } @Test public void testAppLogDirCreation() throws Exception { final String logSuffix = "logs"; this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, logSuffix); InlineDispatcher dispatcher = new InlineDispatcher(); dispatcher.init(this.conf); dispatcher.start(); FileSystem fs = FileSystem.get(this.conf); final FileSystem spyFs = spy(FileSystem.get(this.conf)); LogAggregationService aggSvc = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler) { @Override protected FileSystem getFileSystem(Configuration conf) { return spyFs; } }; aggSvc.init(this.conf); aggSvc.start(); // start an application and verify user, suffix, and app dirs created ApplicationId appId = BuilderUtils.newApplicationId(1, 1); Path userDir = fs.makeQualified(new Path( remoteRootLogDir.getAbsolutePath(), this.user)); Path suffixDir = new Path(userDir, logSuffix); Path appDir = new Path(suffixDir, appId.toString()); aggSvc.handle(new LogHandlerAppStartedEvent(appId, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); verify(spyFs).mkdirs(eq(userDir), isA(FsPermission.class)); verify(spyFs).mkdirs(eq(suffixDir), isA(FsPermission.class)); verify(spyFs).mkdirs(eq(appDir), isA(FsPermission.class)); // start another application and verify only app dir created ApplicationId appId2 = BuilderUtils.newApplicationId(1, 2); Path appDir2 = new Path(suffixDir, appId2.toString()); aggSvc.handle(new LogHandlerAppStartedEvent(appId2, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); verify(spyFs).mkdirs(eq(appDir2), isA(FsPermission.class)); // start another application with the app dir already created and verify // we do not try to create it again ApplicationId appId3 = BuilderUtils.newApplicationId(1, 3); Path appDir3 = new Path(suffixDir, appId3.toString()); new File(appDir3.toUri().getPath()).mkdir(); aggSvc.handle(new LogHandlerAppStartedEvent(appId3, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); verify(spyFs, never()).mkdirs(eq(appDir3), isA(FsPermission.class)); aggSvc.stop(); aggSvc.close(); dispatcher.stop(); } @Test @SuppressWarnings("unchecked") public void testLogAggregationInitAppFailsWithoutKillingNM() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler)); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId appId = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); doThrow(new YarnRuntimeException("KABOOM!")) .when(logAggregationService).initAppAggregator( eq(appId), eq(user), any(Credentials.class), any(ContainerLogsRetentionPolicy.class), anyMap(), any(LogAggregationContext.class)); logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls)); dispatcher.await(); ApplicationEvent expectedEvents[] = new ApplicationEvent[]{ new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); // no filesystems instantiated yet verify(logAggregationService, never()).closeFileSystems( any(UserGroupInformation.class)); // verify trying to collect logs for containers/apps we don't know about // doesn't blow up and tear down the NM logAggregationService.handle(new LogHandlerContainerFinishedEvent( BuilderUtils.newContainerId(4, 1, 1, 1), 0)); dispatcher.await(); logAggregationService.handle(new LogHandlerAppFinishedEvent( BuilderUtils.newApplicationId(1, 5))); dispatcher.await(); } @Test public void testLogAggregationCreateDirsFailsWithoutKillingNM() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler)); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId appId = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); Exception e = new RuntimeException("KABOOM!"); doThrow(e) .when(logAggregationService).createAppDir(any(String.class), any(ApplicationId.class), any(UserGroupInformation.class)); logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY, this.acls)); dispatcher.await(); ApplicationEvent expectedEvents[] = new ApplicationEvent[]{ new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); // filesystems may have been instantiated verify(logAggregationService).closeFileSystems( any(UserGroupInformation.class)); // verify trying to collect logs for containers/apps we don't know about // doesn't blow up and tear down the NM logAggregationService.handle(new LogHandlerContainerFinishedEvent( BuilderUtils.newContainerId(4, 1, 1, 1), 0)); dispatcher.await(); logAggregationService.handle(new LogHandlerAppFinishedEvent( BuilderUtils.newApplicationId(1, 5))); dispatcher.await(); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); } private void writeContainerLogs(File appLogDir, ContainerId containerId, String[] fileName) throws IOException { // ContainerLogDir should be created String containerStr = ConverterUtils.toString(containerId); File containerLogDir = new File(appLogDir, containerStr); containerLogDir.mkdir(); for (String fileType : fileName) { Writer writer11 = new FileWriter(new File(containerLogDir, fileType)); writer11.write(containerStr + " Hello " + fileType + "!"); writer11.close(); } } private LogFileStatusInLastCycle verifyContainerLogs(LogAggregationService logAggregationService, ApplicationId appId, ContainerId[] expectedContainerIds, String[] logFiles, int numOfContainerLogs, boolean multiLogs) throws IOException { Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user); RemoteIterator<FileStatus> nodeFiles = null; try { Path qualifiedLogDir = FileContext.getFileContext(this.conf).makeQualified(appLogDir); nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(), this.conf) .listStatus(appLogDir); } catch (FileNotFoundException fnf) { Assert.fail("Should have log files"); } Assert.assertTrue(nodeFiles.hasNext()); FileStatus targetNodeFile = null; if (! multiLogs) { targetNodeFile = nodeFiles.next(); Assert.assertTrue(targetNodeFile.getPath().getName().equals( LogAggregationUtils.getNodeString(logAggregationService.getNodeId()))); } else { long fileCreateTime = 0; while (nodeFiles.hasNext()) { FileStatus nodeFile = nodeFiles.next(); if (!nodeFile.getPath().getName() .contains(LogAggregationUtils.TMP_FILE_SUFFIX)) { long time = Long.parseLong(nodeFile.getPath().getName().split("_")[2]); if (time > fileCreateTime) { targetNodeFile = nodeFile; fileCreateTime = time; } } } String[] fileName = targetNodeFile.getPath().getName().split("_"); Assert.assertTrue(fileName.length == 3); Assert.assertEquals(fileName[0] + ":" + fileName[1], logAggregationService.getNodeId().toString()); } AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(this.conf, targetNodeFile.getPath()); Assert.assertEquals(this.user, reader.getApplicationOwner()); verifyAcls(reader.getApplicationAcls()); List<String> fileTypes = new ArrayList<String>(); try { Map<String, Map<String, String>> logMap = new HashMap<String, Map<String, String>>(); DataInputStream valueStream; LogKey key = new LogKey(); valueStream = reader.next(key); while (valueStream != null) { LOG.info("Found container " + key.toString()); Map<String, String> perContainerMap = new HashMap<String, String>(); logMap.put(key.toString(), perContainerMap); while (true) { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); LogReader.readAContainerLogsForALogType(valueStream, ps); String writtenLines[] = baos.toString().split( System.getProperty("line.separator")); Assert.assertEquals("LogType:", writtenLines[0].substring(0, 8)); String fileType = writtenLines[0].substring(8); fileTypes.add(fileType); Assert.assertEquals("LogLength:", writtenLines[1].substring(0, 10)); String fileLengthStr = writtenLines[1].substring(10); long fileLength = Long.parseLong(fileLengthStr); Assert.assertEquals("Log Contents:", writtenLines[2].substring(0, 13)); String logContents = StringUtils.join( Arrays.copyOfRange(writtenLines, 3, writtenLines.length), "\n"); perContainerMap.put(fileType, logContents); LOG.info("LogType:" + fileType); LOG.info("LogLength:" + fileLength); LOG.info("Log Contents:\n" + perContainerMap.get(fileType)); } catch (EOFException eof) { break; } } // Next container key = new LogKey(); valueStream = reader.next(key); } // 1 for each container Assert.assertEquals(expectedContainerIds.length, logMap.size()); for (ContainerId cId : expectedContainerIds) { String containerStr = ConverterUtils.toString(cId); Map<String, String> thisContainerMap = logMap.remove(containerStr); Assert.assertEquals(numOfContainerLogs, thisContainerMap.size()); for (String fileType : logFiles) { String expectedValue = containerStr + " Hello " + fileType + "!End of LogType:" + fileType; LOG.info("Expected log-content : " + new String(expectedValue)); String foundValue = thisContainerMap.remove(fileType); Assert.assertNotNull(cId + " " + fileType + " not present in aggregated log-file!", foundValue); Assert.assertEquals(expectedValue, foundValue); } Assert.assertEquals(0, thisContainerMap.size()); } Assert.assertEquals(0, logMap.size()); return new LogFileStatusInLastCycle(targetNodeFile.getPath().getName(), fileTypes); } finally { reader.close(); } } @Test public void testLogAggregationForRealContainerLaunch() throws IOException, InterruptedException, YarnException { this.containerManager.start(); File scriptFile = new File(tmpDir, "scriptFile.sh"); PrintWriter fileWriter = new PrintWriter(scriptFile); fileWriter.write("\necho Hello World! Stdout! > " + new File(localLogDir, "stdout")); fileWriter.write("\necho Hello World! Stderr! > " + new File(localLogDir, "stderr")); fileWriter.write("\necho Hello World! Syslog! > " + new File(localLogDir, "syslog")); fileWriter.close(); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put(destinationFile, rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List<String> commands = new ArrayList<String>(); commands.add("/bin/bash"); commands.add(scriptFile.getAbsolutePath()); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, TestContainerManager.createContainerToken( cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager())); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); this.containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(this.containerManager, cId, ContainerState.COMPLETE); this.containerManager.handle(new CMgrCompletedAppsEvent(Arrays .asList(appId), CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN)); this.containerManager.stop(); } private void verifyAcls(Map<ApplicationAccessType, String> logAcls) { Assert.assertEquals(this.acls.size(), logAcls.size()); for (ApplicationAccessType appAccessType : this.acls.keySet()) { Assert.assertEquals(this.acls.get(appAccessType), logAcls.get(appAccessType)); } } private DrainDispatcher createDispatcher() { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(this.conf); dispatcher.start(); return dispatcher; } private Map<ApplicationAccessType, String> createAppAcls() { Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>(); appAcls.put(ApplicationAccessType.MODIFY_APP, "user group"); appAcls.put(ApplicationAccessType.VIEW_APP, "*"); return appAcls; } @Test(timeout=20000) public void testStopAfterError() throws Exception { DeletionService delSrvc = mock(DeletionService.class); // get the AppLogAggregationImpl thread to crash LocalDirsHandlerService mockedDirSvc = mock(LocalDirsHandlerService.class); when(mockedDirSvc.getLogDirs()).thenThrow(new RuntimeException()); LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, mockedDirSvc); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); logAggregationService.handle(new LogHandlerAppStartedEvent( application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); logAggregationService.close(); } @Test public void testLogAggregatorCleanup() throws Exception { DeletionService delSrvc = mock(DeletionService.class); // get the AppLogAggregationImpl thread to crash LocalDirsHandlerService mockedDirSvc = mock(LocalDirsHandlerService.class); LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, mockedDirSvc); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); logAggregationService.handle(new LogHandlerAppStartedEvent( application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); dispatcher.await(); int timeToWait = 20 * 1000; while (timeToWait > 0 && logAggregationService.getNumAggregators() > 0) { Thread.sleep(100); timeToWait -= 100; } Assert.assertEquals("Log aggregator failed to cleanup!", 0, logAggregationService.getNumAggregators()); logAggregationService.stop(); logAggregationService.close(); } @SuppressWarnings("unchecked") private static <T extends Event<?>> void checkEvents(EventHandler<T> eventHandler, T expectedEvents[], boolean inOrder, String... methods) throws Exception { Class<T> genericClass = (Class<T>)expectedEvents.getClass().getComponentType(); ArgumentCaptor<T> eventCaptor = ArgumentCaptor.forClass(genericClass); // captor work work unless used via a verify verify(eventHandler, atLeast(0)).handle(eventCaptor.capture()); List<T> actualEvents = eventCaptor.getAllValues(); // batch up exceptions so junit presents them as one MultiException failures = new MultiException(); try { assertEquals("expected events", expectedEvents.length, actualEvents.size()); } catch (Throwable e) { failures.add(e); } if (inOrder) { // sequentially verify the events int len = Math.max(expectedEvents.length, actualEvents.size()); for (int n=0; n < len; n++) { try { String expect = (n < expectedEvents.length) ? eventToString(expectedEvents[n], methods) : null; String actual = (n < actualEvents.size()) ? eventToString(actualEvents.get(n), methods) : null; assertEquals("event#"+n, expect, actual); } catch (Throwable e) { failures.add(e); } } } else { // verify the actual events were expected // verify no expected events were not seen Set<String> expectedSet = new HashSet<String>(); for (T expectedEvent : expectedEvents) { expectedSet.add(eventToString(expectedEvent, methods)); } for (T actualEvent : actualEvents) { try { String actual = eventToString(actualEvent, methods); assertTrue("unexpected event: "+actual, expectedSet.remove(actual)); } catch (Throwable e) { failures.add(e); } } for (String expected : expectedSet) { try { Assert.fail("missing event: "+expected); } catch (Throwable e) { failures.add(e); } } } failures.ifExceptionThrow(); } private static String eventToString(Event<?> event, String[] methods) throws Exception { StringBuilder sb = new StringBuilder("[ "); for (String m : methods) { try { Method method = event.getClass().getMethod(m); String value = method.invoke(event).toString(); sb.append(method.getName()).append("=").append(value).append(" "); } catch (Exception e) { // ignore, actual event may not implement the method... } } sb.append("]"); return sb.toString(); } /* * Test to make sure we handle cases where the directories we get back from * the LocalDirsHandler may have issues including the log dir not being * present as well as other issues. The test uses helper functions from * TestNonAggregatingLogHandler. */ @Test public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception { // setup conf and services DeletionService mockDelService = mock(DeletionService.class); File[] localLogDirs = TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass() .getName(), 7); final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length); for (int i = 0; i < localLogDirs.length; i++) { localLogDirPaths.add(localLogDirs[i].getAbsolutePath()); } String localLogDirsString = StringUtils.join(localLogDirPaths, ","); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1); this.dirsHandler = new LocalDirsHandlerService(); LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class); LogAggregationService logAggregationService = spy(new LogAggregationService(dispatcher, this.context, mockDelService, mockDirsHandler)); AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem()); FileContext lfs = FileContext.getFileContext(spylfs, conf); doReturn(lfs).when(logAggregationService).getLocalFileContext( isA(Configuration.class)); logAggregationService.init(this.conf); logAggregationService.start(); TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService, application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); verify(logAggregationService).closeFileSystems( any(UserGroupInformation.class)); ApplicationEvent expectedEvents[] = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) }; checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID"); } @Test (timeout = 50000) @SuppressWarnings("unchecked") public void testLogAggregationServiceWithPatterns() throws Exception { LogAggregationContext logAggregationContextWithIncludePatterns = Records.newRecord(LogAggregationContext.class); String includePattern = "stdout|syslog"; logAggregationContextWithIncludePatterns.setIncludePattern(includePattern); LogAggregationContext LogAggregationContextWithExcludePatterns = Records.newRecord(LogAggregationContext.class); String excludePattern = "stdout|syslog"; LogAggregationContextWithExcludePatterns.setExcludePattern(excludePattern); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2); ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3); ApplicationId application4 = BuilderUtils.newApplicationId(1234, 4); Application mockApp = mock(Application.class); when(mockApp.getContainers()).thenReturn( new HashMap<ContainerId, Container>()); this.context.getApplications().put(application1, mockApp); this.context.getApplications().put(application2, mockApp); this.context.getApplications().put(application3, mockApp); this.context.getApplications().put(application4, mockApp); LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); // LogContext for application1 has includePatten which includes // stdout and syslog. // After logAggregation is finished, we expect the logs for application1 // has only logs from stdout and syslog // AppLogDir should be created File appLogDir1 = new File(localLogDir, ConverterUtils.toString(application1)); appLogDir1.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, logAggregationContextWithIncludePatterns)); ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(application1, 1); ContainerId container1 = BuilderUtils.newContainerId(appAttemptId1, 1); // Simulate log-file creation writeContainerLogs(appLogDir1, container1, new String[] { "stdout", "stderr", "syslog" }); logAggregationService.handle(new LogHandlerContainerFinishedEvent( container1, 0)); // LogContext for application2 has excludePatten which includes // stdout and syslog. // After logAggregation is finished, we expect the logs for application2 // has only logs from stderr ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(application2, 1); File app2LogDir = new File(localLogDir, ConverterUtils.toString(application2)); app2LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application2, this.user, null, ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY, this.acls, LogAggregationContextWithExcludePatterns)); ContainerId container2 = BuilderUtils.newContainerId(appAttemptId2, 1); writeContainerLogs(app2LogDir, container2, new String[] { "stdout", "stderr", "syslog" }); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container2, 0)); // LogContext for application3 has includePattern which is *.log and // excludePatten which includes std.log and sys.log. // After logAggregation is finished, we expect the logs for application3 // has all logs whose suffix is .log but excluding sys.log and std.log LogAggregationContext context1 = Records.newRecord(LogAggregationContext.class); context1.setIncludePattern(".*.log"); context1.setExcludePattern("sys.log|std.log"); ApplicationAttemptId appAttemptId3 = BuilderUtils.newApplicationAttemptId(application3, 1); File app3LogDir = new File(localLogDir, ConverterUtils.toString(application3)); app3LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application3, this.user, null, ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY, this.acls, context1)); ContainerId container3 = BuilderUtils.newContainerId(appAttemptId3, 1); writeContainerLogs(app3LogDir, container3, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" }); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container3, 0)); // LogContext for application4 has includePattern // which includes std.log and sys.log and // excludePatten which includes std.log. // After logAggregation is finished, we expect the logs for application4 // only has sys.log LogAggregationContext context2 = Records.newRecord(LogAggregationContext.class); context2.setIncludePattern("sys.log|std.log"); context2.setExcludePattern("std.log"); ApplicationAttemptId appAttemptId4 = BuilderUtils.newApplicationAttemptId(application4, 1); File app4LogDir = new File(localLogDir, ConverterUtils.toString(application4)); app4LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application4, this.user, null, ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY, this.acls, context2)); ContainerId container4 = BuilderUtils.newContainerId(appAttemptId4, 1); writeContainerLogs(app4LogDir, container4, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" }); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container4, 0)); dispatcher.await(); ApplicationEvent expectedInitEvents[] = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED)}; checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID"); reset(appEventHandler); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application2)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application3)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application4)); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); String[] logFiles = new String[] { "stdout", "syslog" }; verifyContainerLogs(logAggregationService, application1, new ContainerId[] { container1 }, logFiles, 2, false); logFiles = new String[] { "stderr" }; verifyContainerLogs(logAggregationService, application2, new ContainerId[] { container2 }, logFiles, 1, false); logFiles = new String[] { "out.log", "err.log" }; verifyContainerLogs(logAggregationService, application3, new ContainerId[] { container3 }, logFiles, 2, false); logFiles = new String[] { "sys.log" }; verifyContainerLogs(logAggregationService, application4, new ContainerId[] { container4 }, logFiles, 1, false); dispatcher.await(); ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) }; checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID"); } @Test (timeout = 50000) public void testLogAggregationServiceWithInterval() throws Exception { testLogAggregationService(false); } @Test (timeout = 50000) public void testLogAggregationServiceWithRetention() throws Exception { testLogAggregationService(true); } @SuppressWarnings("unchecked") private void testLogAggregationService(boolean retentionSizeLimitation) throws Exception { LogAggregationContext logAggregationContextWithInterval = Records.newRecord(LogAggregationContext.class); // set IncludePattern/excludePattern in rolling fashion // we expect all the logs except std_final will be uploaded // when app is running. The std_final will be uploaded when // the app finishes. logAggregationContextWithInterval.setRolledLogsIncludePattern(".*"); logAggregationContextWithInterval.setRolledLogsExcludePattern("std_final"); this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); //configure YarnConfiguration.NM_REMOTE_APP_LOG_DIR to //have fully qualified path this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, "file://" + this.remoteRootLogDir.getAbsolutePath()); this.conf.setLong( YarnConfiguration.NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS, 3600); if (retentionSizeLimitation) { // set the retention size as 1. The number of logs for one application // in one NM should be 1. this.conf.setInt(YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app", 1); } // by setting this configuration, the log files will not be deleted immediately after // they are aggregated to remote directory. // We could use it to test whether the previous aggregated log files will be aggregated // again in next cycle. this.conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600); ApplicationId application = BuilderUtils.newApplicationId(123456, 1); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application, 1); ContainerId container = BuilderUtils.newContainerId(appAttemptId, 1); Context context = spy(this.context); ConcurrentMap<ApplicationId, Application> maps = new ConcurrentHashMap<ApplicationId, Application>(); Application app = mock(Application.class); Map<ContainerId, Container> containers = new HashMap<ContainerId, Container>(); containers.put(container, mock(Container.class)); maps.put(application, app); when(app.getContainers()).thenReturn(containers); when(context.getApplications()).thenReturn(maps); LogAggregationService logAggregationService = new LogAggregationService(dispatcher, context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); // AppLogDir should be created File appLogDir = new File(localLogDir, ConverterUtils.toString(application)); appLogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, logAggregationContextWithInterval)); LogFileStatusInLastCycle logFileStatusInLastCycle = null; // Simulate log-file creation // create std_final in log directory which will not be aggregated // until the app finishes. String[] logFiles1WithFinalLog = new String[] { "stdout", "stderr", "syslog", "std_final" }; String[] logFiles1 = new String[] { "stdout", "stderr", "syslog"}; writeContainerLogs(appLogDir, container, logFiles1WithFinalLog); // Do log aggregation AppLogAggregatorImpl aggregator = (AppLogAggregatorImpl) logAggregationService.getAppLogAggregators() .get(application); aggregator.doLogAggregationOutOfBand(); if (retentionSizeLimitation) { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true, null)); } else { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, false, null)); } // Container logs should be uploaded logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application, new ContainerId[] { container }, logFiles1, 3, true); for(String logFile : logFiles1) { Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle() .contains(logFile)); } // Make sure the std_final is not uploaded. Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle() .contains("std_final")); Thread.sleep(2000); // There is no log generated at this time. Do the log aggregation again. aggregator.doLogAggregationOutOfBand(); // Same logs will not be aggregated again. // Only one aggregated log file in Remote file directory. Assert.assertEquals(numOfLogsAvailable(logAggregationService, application, true, null), 1); Thread.sleep(2000); // Do log aggregation String[] logFiles2 = new String[] { "stdout_1", "stderr_1", "syslog_1" }; writeContainerLogs(appLogDir, container, logFiles2); aggregator.doLogAggregationOutOfBand(); if (retentionSizeLimitation) { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true, logFileStatusInLastCycle.getLogFilePathInLastCycle())); } else { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 2, false, null)); } // Container logs should be uploaded logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application, new ContainerId[] { container }, logFiles2, 3, true); for(String logFile : logFiles2) { Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle() .contains(logFile)); } // Make sure the std_final is not uploaded. Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle() .contains("std_final")); Thread.sleep(2000); // create another logs String[] logFiles3 = new String[] { "stdout_2", "stderr_2", "syslog_2" }; writeContainerLogs(appLogDir, container, logFiles3); logAggregationService.handle( new LogHandlerContainerFinishedEvent(container, 0)); dispatcher.await(); logAggregationService.handle(new LogHandlerAppFinishedEvent(application)); if (retentionSizeLimitation) { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true, logFileStatusInLastCycle.getLogFilePathInLastCycle())); } else { Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 3, false, null)); } // the app is finished. The log "std_final" should be aggregated this time. String[] logFiles3WithFinalLog = new String[] { "stdout_2", "stderr_2", "syslog_2", "std_final" }; verifyContainerLogs(logAggregationService, application, new ContainerId[] { container }, logFiles3WithFinalLog, 4, true); logAggregationService.stop(); assertEquals(0, logAggregationService.getNumAggregators()); } @Test (timeout = 20000) public void testAddNewTokenSentFromRMForLogAggregation() throws Exception { Configuration conf = new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1); Application mockApp = mock(Application.class); when(mockApp.getContainers()).thenReturn( new HashMap<ContainerId, Container>()); this.context.getApplications().put(application1, mockApp); @SuppressWarnings("resource") LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls, Records.newRecord(LogAggregationContext.class))); // Inject new token for log-aggregation after app log-aggregator init Text userText1 = new Text("user1"); RMDelegationTokenIdentifier dtId1 = new RMDelegationTokenIdentifier(userText1, new Text("renewer1"), userText1); final Token<RMDelegationTokenIdentifier> token1 = new Token<RMDelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1")); Credentials credentials = new Credentials(); credentials.addToken(userText1, token1); this.context.getSystemCredentialsForApps().put(application1, credentials); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); final UserGroupInformation ugi = ((AppLogAggregatorImpl) logAggregationService.getAppLogAggregators() .get(application1)).getUgi(); GenericTestUtils.waitFor(new Supplier<Boolean>() { public Boolean get() { boolean hasNewToken = false; for (Token<?> token : ugi.getCredentials().getAllTokens()) { if (token.equals(token1)) { hasNewToken = true; } } return hasNewToken; } }, 1000, 20000); logAggregationService.stop(); } private int numOfLogsAvailable(LogAggregationService logAggregationService, ApplicationId appId, boolean sizeLimited, String lastLogFile) throws IOException { Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user); RemoteIterator<FileStatus> nodeFiles = null; try { Path qualifiedLogDir = FileContext.getFileContext(this.conf).makeQualified(appLogDir); nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(), this.conf) .listStatus(appLogDir); } catch (FileNotFoundException fnf) { return -1; } int count = 0; while (nodeFiles.hasNext()) { FileStatus status = nodeFiles.next(); String filename = status.getPath().getName(); if (filename.contains(LogAggregationUtils.TMP_FILE_SUFFIX) || (lastLogFile != null && filename.contains(lastLogFile) && sizeLimited)) { return -1; } if (filename.contains(LogAggregationUtils .getNodeString(logAggregationService.getNodeId()))) { count++; } } return count; } private boolean waitAndCheckLogNum( LogAggregationService logAggregationService, ApplicationId application, int maxAttempts, int expectNum, boolean sizeLimited, String lastLogFile) throws IOException, InterruptedException { int count = 0; while (numOfLogsAvailable(logAggregationService, application, sizeLimited, lastLogFile) != expectNum && count <= maxAttempts) { Thread.sleep(500); count++; } return numOfLogsAvailable(logAggregationService, application, sizeLimited, lastLogFile) == expectNum; } private static class LogFileStatusInLastCycle { private String logFilePathInLastCycle; private List<String> logFileTypesInLastCycle; public LogFileStatusInLastCycle(String logFilePathInLastCycle, List<String> logFileTypesInLastCycle) { this.logFilePathInLastCycle = logFilePathInLastCycle; this.logFileTypesInLastCycle = logFileTypesInLastCycle; } public String getLogFilePathInLastCycle() { return this.logFilePathInLastCycle; } public List<String> getLogFileTypesInLastCycle() { return this.logFileTypesInLastCycle; } } }
68,085
40.566545
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; public abstract class ContainerExecutor implements Configurable { private static final Log LOG = LogFactory.getLog(ContainerExecutor.class); final public static FsPermission TASK_LAUNCH_SCRIPT_PERMISSION = FsPermission.createImmutable((short) 0700); private Configuration conf; private ConcurrentMap<ContainerId, Path> pidFiles = new ConcurrentHashMap<ContainerId, Path>(); private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReadLock readLock = lock.readLock(); private final WriteLock writeLock = lock.writeLock(); @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } /** * Run the executor initialization steps. * Verify that the necessary configs, permissions are in place. * @throws IOException */ public abstract void init() throws IOException; /** * On Windows the ContainerLaunch creates a temporary special jar manifest of * other jars to workaround the CLASSPATH length. In a secure cluster this * jar must be localized so that the container has access to it. * This function localizes on-demand the jar. * * @param classPathJar * @param owner * @throws IOException */ public Path localizeClasspathJar(Path classPathJar, Path pwd, String owner) throws IOException { // Non-secure executor simply use the classpath created // in the NM fprivate folder return classPathJar; } /** * Prepare the environment for containers in this application to execute. * <pre> * For $x in local.dirs * create $x/$user/$appId * Copy $nmLocal/appTokens {@literal ->} $N/$user/$appId * For $rsrc in private resources * Copy $rsrc {@literal ->} $N/$user/filecache/[idef] * For $rsrc in job resources * Copy $rsrc {@literal ->} $N/$user/$appId/filecache/idef * </pre> * @param ctx LocalizerStartContext that encapsulates necessary information * for starting a localizer. * @throws IOException For most application init failures * @throws InterruptedException If application init thread is halted by NM */ public abstract void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException; /** * Launch the container on the node. This is a blocking call and returns only * when the container exits. * @param ctx Encapsulates information necessary for launching containers. * @return the return status of the launch * @throws IOException */ public abstract int launchContainer(ContainerStartContext ctx) throws IOException; /** * Signal container with the specified signal. * @param ctx Encapsulates information necessary for signaling containers. * @return returns true if the operation succeeded * @throws IOException */ public abstract boolean signalContainer(ContainerSignalContext ctx) throws IOException; /** * Delete specified directories as a given user. * @param ctx Encapsulates information necessary for deletion. * @throws IOException * @throws InterruptedException */ public abstract void deleteAsUser(DeletionAsUserContext ctx) throws IOException, InterruptedException; /** * Check if a container is alive. * @param ctx Encapsulates information necessary for container liveness check. * @return true if container is still alive * @throws IOException */ public abstract boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException; /** * Recover an already existing container. This is a blocking call and returns * only when the container exits. Note that the container must have been * activated prior to this call. * @param ctx encapsulates information necessary to reacquire container * @return The exit code of the pre-existing container * @throws IOException * @throws InterruptedException */ public int reacquireContainer(ContainerReacquisitionContext ctx) throws IOException, InterruptedException { Container container = ctx.getContainer(); String user = ctx.getUser(); ContainerId containerId = ctx.getContainerId(); Path pidPath = getPidFilePath(containerId); if (pidPath == null) { LOG.warn(containerId + " is not active, returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } String pid = null; pid = ProcessIdFileReader.getProcessId(pidPath); if (pid == null) { throw new IOException("Unable to determine pid for " + containerId); } LOG.info("Reacquiring " + containerId + " with pid " + pid); ContainerLivenessContext livenessContext = new ContainerLivenessContext .Builder() .setContainer(container) .setUser(user) .setPid(pid) .build(); while(isContainerAlive(livenessContext)) { Thread.sleep(1000); } // wait for exit code file to appear String exitCodeFile = ContainerLaunch.getExitCodeFile(pidPath.toString()); File file = new File(exitCodeFile); final int sleepMsec = 100; int msecLeft = 2000; while (!file.exists() && msecLeft >= 0) { if (!isContainerActive(containerId)) { LOG.info(containerId + " was deactivated"); return ExitCode.TERMINATED.getExitCode(); } Thread.sleep(sleepMsec); msecLeft -= sleepMsec; } if (msecLeft < 0) { throw new IOException("Timeout while waiting for exit code from " + containerId); } try { return Integer.parseInt(FileUtils.readFileToString(file).trim()); } catch (NumberFormatException e) { throw new IOException("Error parsing exit code from pid " + pid, e); } } /** * This method writes out the launch environment of a container. This can be * overridden by extending ContainerExecutors to provide different behaviors * @param out the output stream to which the environment is written (usually * a script file which will be executed by the Launcher) * @param environment The environment variables and their values * @param resources The resources which have been localized for this container * Symlinks will be created to these localized resources * @param command The command that will be run. * @throws IOException if any errors happened writing to the OutputStream, * while creating symlinks */ public void writeLaunchEnv(OutputStream out, Map<String, String> environment, Map<Path, List<String>> resources, List<String> command) throws IOException{ ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); Set<String> whitelist = new HashSet<String>(); whitelist.add(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME); whitelist.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name()); whitelist.add(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name()); whitelist.add(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name()); whitelist.add(ApplicationConstants.Environment.HADOOP_CONF_DIR.name()); whitelist.add(ApplicationConstants.Environment.JAVA_HOME.name()); if (environment != null) { for (Map.Entry<String,String> env : environment.entrySet()) { if (!whitelist.contains(env.getKey())) { sb.env(env.getKey().toString(), env.getValue().toString()); } else { sb.whitelistedEnv(env.getKey().toString(), env.getValue().toString()); } } } if (resources != null) { for (Map.Entry<Path,List<String>> entry : resources.entrySet()) { for (String linkName : entry.getValue()) { sb.symlink(entry.getKey(), new Path(linkName)); } } } sb.command(command); PrintStream pout = null; try { pout = new PrintStream(out, false, "UTF-8"); sb.write(pout); } finally { if (out != null) { out.close(); } } } public enum ExitCode { FORCE_KILLED(137), TERMINATED(143), LOST(154); private final int code; private ExitCode(int exitCode) { this.code = exitCode; } public int getExitCode() { return code; } @Override public String toString() { return String.valueOf(code); } } /** * The constants for the signals. */ public enum Signal { NULL(0, "NULL"), QUIT(3, "SIGQUIT"), KILL(9, "SIGKILL"), TERM(15, "SIGTERM"); private final int value; private final String str; private Signal(int value, String str) { this.str = str; this.value = value; } public int getValue() { return value; } @Override public String toString() { return str; } } protected void logOutput(String output) { String shExecOutput = output; if (shExecOutput != null) { for (String str : shExecOutput.split("\n")) { LOG.info(str); } } } /** * Get the pidFile of the container. * @param containerId * @return the path of the pid-file for the given containerId. */ protected Path getPidFilePath(ContainerId containerId) { try { readLock.lock(); return (this.pidFiles.get(containerId)); } finally { readLock.unlock(); } } protected String[] getRunCommand(String command, String groupId, String userName, Path pidFile, Configuration conf) { return getRunCommand(command, groupId, userName, pidFile, conf, null); } /** * Return a command to execute the given command in OS shell. * On Windows, the passed in groupId can be used to launch * and associate the given groupId in a process group. On * non-Windows, groupId is ignored. */ protected String[] getRunCommand(String command, String groupId, String userName, Path pidFile, Configuration conf, Resource resource) { boolean containerSchedPriorityIsSet = false; int containerSchedPriorityAdjustment = YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY; if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != null) { containerSchedPriorityIsSet = true; containerSchedPriorityAdjustment = conf .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY); } if (Shell.WINDOWS) { int cpuRate = -1; int memory = -1; if (resource != null) { if (conf .getBoolean( YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) { memory = resource.getMemory(); } if (conf.getBoolean( YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) { int containerVCores = resource.getVirtualCores(); int nodeVCores = NodeManagerHardwareUtils.getVCores(conf); int nodeCpuPercentage = NodeManagerHardwareUtils.getNodeCpuPercentage(conf); float containerCpuPercentage = (float) (nodeCpuPercentage * containerVCores) / nodeVCores; // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit // should be set as 20 * 100. cpuRate = Math.min(10000, (int) (containerCpuPercentage * 100)); } } return new String[] { Shell.WINUTILS, "task", "create", "-m", String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId, "cmd /c " + command }; } else { List<String> retCommand = new ArrayList<String>(); if (containerSchedPriorityIsSet) { retCommand.addAll(Arrays.asList("nice", "-n", Integer.toString(containerSchedPriorityAdjustment))); } retCommand.addAll(Arrays.asList("bash", command)); return retCommand.toArray(new String[retCommand.size()]); } } /** * Is the container still active? * @param containerId * @return true if the container is active else false. */ protected boolean isContainerActive(ContainerId containerId) { try { readLock.lock(); return (this.pidFiles.containsKey(containerId)); } finally { readLock.unlock(); } } /** * Mark the container as active * * @param containerId * the ContainerId * @param pidFilePath * Path where the executor should write the pid of the launched * process */ public void activateContainer(ContainerId containerId, Path pidFilePath) { try { writeLock.lock(); this.pidFiles.put(containerId, pidFilePath); } finally { writeLock.unlock(); } } /** * Mark the container as inactive. * Done iff the container is still active. Else treat it as * a no-op */ public void deactivateContainer(ContainerId containerId) { try { writeLock.lock(); this.pidFiles.remove(containerId); } finally { writeLock.unlock(); } } /** * Get the process-identifier for the container * * @param containerID * @return the processid of the container if it has already launched, * otherwise return null */ public String getProcessId(ContainerId containerID) { String pid = null; Path pidFile = pidFiles.get(containerID); if (pidFile == null) { // This container isn't even launched yet. return pid; } try { pid = ProcessIdFileReader.getProcessId(pidFile); } catch (IOException e) { LOG.error("Got exception reading pid from pid-file " + pidFile, e); } return pid; } public static class DelayedProcessKiller extends Thread { private Container container; private final String user; private final String pid; private final long delay; private final Signal signal; private final ContainerExecutor containerExecutor; public DelayedProcessKiller(Container container, String user, String pid, long delay, Signal signal, ContainerExecutor containerExecutor) { this.container = container; this.user = user; this.pid = pid; this.delay = delay; this.signal = signal; this.containerExecutor = containerExecutor; setName("Task killer for " + pid); setDaemon(false); } @Override public void run() { try { Thread.sleep(delay); containerExecutor.signalContainer(new ContainerSignalContext.Builder() .setContainer(container) .setUser(user) .setPid(pid) .setSignal(signal) .build()); } catch (InterruptedException e) { return; } catch (IOException e) { String message = "Exception when user " + user + " killing task " + pid + " in DelayedProcessKiller: " + StringUtils.stringifyException(e); LOG.warn(message); container.handle(new ContainerDiagnosticsUpdateEvent(container .getContainerId(), message)); } } } }
18,086
33.255682
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.util.Map; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; /** * Context interface for sharing information across components in the * NodeManager. */ public interface Context { /** * Return the nodeId. Usable only when the ContainerManager is started. * * @return the NodeId */ NodeId getNodeId(); /** * Return the node http-address. Usable only after the Webserver is started. * * @return the http-port */ int getHttpPort(); ConcurrentMap<ApplicationId, Application> getApplications(); Map<ApplicationId, Credentials> getSystemCredentialsForApps(); ConcurrentMap<ContainerId, Container> getContainers(); NMContainerTokenSecretManager getContainerTokenSecretManager(); NMTokenSecretManagerInNM getNMTokenSecretManager(); NodeHealthStatus getNodeHealthStatus(); ContainerManagementProtocol getContainerManager(); LocalDirsHandlerService getLocalDirsHandler(); ApplicationACLsManager getApplicationACLsManager(); NMStateStoreService getNMStateStore(); boolean getDecommissioned(); void setDecommissioned(boolean isDecommissioned); ConcurrentLinkedQueue<LogAggregationReport> getLogAggregationStatusForApps(); }
3,016
34.081395
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; public enum NodeManagerEventType { SHUTDOWN, RESYNC }
915
38.826087
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Strings; /** * This executor will launch and run tasks inside Docker containers. It * currently only supports simple authentication mode. It shares a lot of code * with the DefaultContainerExecutor (and it may make sense to pull out those * common pieces later). */ public class DockerContainerExecutor extends ContainerExecutor { private static final Log LOG = LogFactory .getLog(DockerContainerExecutor.class); //The name of the script file that will launch the Docker containers public static final String DOCKER_CONTAINER_EXECUTOR_SCRIPT = "docker_container_executor"; //The name of the session script that the DOCKER_CONTAINER_EXECUTOR_SCRIPT //launches in turn public static final String DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT = "docker_container_executor_session"; //This validates that the image is a proper docker image and would not crash //docker. The image name is not allowed to contain spaces. e.g. //registry.somecompany.com:9999/containername:0.1 or //containername:0.1 or //containername public static final String DOCKER_IMAGE_PATTERN = "^(([\\w\\.-]+)(:\\d+)*\\/)?[\\w\\.:-]+$"; private final FileContext lfs; private final Pattern dockerImagePattern; public DockerContainerExecutor() { try { this.lfs = FileContext.getLocalFSFileContext(); this.dockerImagePattern = Pattern.compile(DOCKER_IMAGE_PATTERN); } catch (UnsupportedFileSystemException e) { throw new RuntimeException(e); } } protected void copyFile(Path src, Path dst, String owner) throws IOException { lfs.util().copy(src, dst); } @Override public void init() throws IOException { String auth = getConf().get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION); if (auth != null && !auth.equals("simple")) { throw new IllegalStateException( "DockerContainerExecutor only works with simple authentication mode"); } String dockerExecutor = getConf().get( YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, YarnConfiguration.NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME); if (!new File(dockerExecutor).exists()) { throw new IllegalStateException( "Invalid docker exec path: " + dockerExecutor); } } @Override public synchronized void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens(); InetSocketAddress nmAddr = ctx.getNmAddr(); String user = ctx.getUser(); String appId = ctx.getAppId(); String locId = ctx.getLocId(); LocalDirsHandlerService dirsHandler = ctx.getDirsHandler(); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); ContainerLocalizer localizer = new ContainerLocalizer(lfs, user, appId, locId, getPaths(localDirs), RecordFactoryProvider.getRecordFactory(getConf())); createUserLocalDirs(localDirs, user); createUserCacheDirs(localDirs, user); createAppDirs(localDirs, user, appId); createAppLogDirs(appId, logDirs, user); // randomly choose the local directory Path appStorageDir = getWorkingDir(localDirs, user, appId); String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId); Path tokenDst = new Path(appStorageDir, tokenFn); copyFile(nmPrivateContainerTokensPath, tokenDst, user); LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + tokenDst); lfs.setWorkingDirectory(appStorageDir); LOG.info("CWD set to " + appStorageDir + " = " + lfs.getWorkingDirectory()); // TODO: DO it over RPC for maintaining similarity? localizer.runLocalization(nmAddr); } @Override public int launchContainer(ContainerStartContext ctx) throws IOException { Container container = ctx.getContainer(); Path nmPrivateContainerScriptPath = ctx.getNmPrivateContainerScriptPath(); Path nmPrivateTokensPath = ctx.getNmPrivateTokensPath(); String userName = ctx.getUser(); Path containerWorkDir = ctx.getContainerWorkDir(); List<String> localDirs = ctx.getLocalDirs(); List<String> logDirs = ctx.getLogDirs(); //Variables for the launch environment can be injected from the command-line //while submitting the application String containerImageName = container.getLaunchContext().getEnvironment() .get(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME); if (LOG.isDebugEnabled()) { LOG.debug("containerImageName from launchContext: " + containerImageName); } Preconditions.checkArgument(!Strings.isNullOrEmpty(containerImageName), "Container image must not be null"); containerImageName = containerImageName.replaceAll("['\"]", ""); Preconditions.checkArgument(saneDockerImage(containerImageName), "Image: " + containerImageName + " is not a proper docker image"); String dockerExecutor = getConf().get( YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, YarnConfiguration.NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME); FsPermission dirPerm = new FsPermission(APPDIR_PERM); ContainerId containerId = container.getContainerId(); // create container dirs on all disks String containerIdStr = ConverterUtils.toString(containerId); String appIdStr = ConverterUtils.toString( containerId.getApplicationAttemptId().getApplicationId()); for (String sLocalDir : localDirs) { Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, userName); Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE); Path appDir = new Path(appCacheDir, appIdStr); Path containerDir = new Path(appDir, containerIdStr); createDir(containerDir, dirPerm, true, userName); } // Create the container log-dirs on all disks createContainerLogDirs(appIdStr, containerIdStr, logDirs, userName); Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR); createDir(tmpDir, dirPerm, false, userName); // copy launch script to work dir Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); lfs.util().copy(nmPrivateContainerScriptPath, launchDst); // copy container tokens to work dir Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE); lfs.util().copy(nmPrivateTokensPath, tokenDst); String localDirMount = toMount(localDirs); String logDirMount = toMount(logDirs); String containerWorkDirMount = toMount(Collections.singletonList( containerWorkDir.toUri().getPath())); StringBuilder commands = new StringBuilder(); //Use docker run to launch the docker container. See man pages for //docker-run //--rm removes the container automatically once the container finishes //--net=host allows the container to take on the host's network stack //--name sets the Docker Container name to the YARN containerId string //-v is used to bind mount volumes for local, log and work dirs. String commandStr = commands.append(dockerExecutor) .append(" ") .append("run") .append(" ") .append("--rm --net=host") .append(" ") .append(" --name " + containerIdStr) .append(localDirMount) .append(logDirMount) .append(containerWorkDirMount) .append(" ") .append(containerImageName) .toString(); //Get the pid of the process which has been launched as a docker container //using docker inspect String dockerPidScript = "`" + dockerExecutor + " inspect --format {{.State.Pid}} " + containerIdStr + "`"; // Create new local launch wrapper script LocalWrapperScriptBuilder sb = new UnixLocalWrapperScriptBuilder( containerWorkDir, commandStr, dockerPidScript); Path pidFile = getPidFilePath(containerId); if (pidFile != null) { sb.writeLocalWrapperScript(launchDst, pidFile); } else { //Although the container was activated by ContainerLaunch before exec() //was called, since then deactivateContainer() has been called. LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } ShellCommandExecutor shExec = null; try { lfs.setPermission(launchDst, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); lfs.setPermission(sb.getWrapperScriptPath(), ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); // Setup command to run String[] command = getRunCommand(sb.getWrapperScriptPath().toString(), containerIdStr, userName, pidFile, this.getConf()); if (LOG.isDebugEnabled()) { LOG.debug("launchContainer: " + commandStr + " " + Joiner.on(" ").join(command)); } shExec = new ShellCommandExecutor( command, new File(containerWorkDir.toUri().getPath()), container.getLaunchContext().getEnvironment()); // sanitized env if (isContainerActive(containerId)) { shExec.execute(); } else { LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } } catch (IOException e) { if (null == shExec) { return -1; } int exitCode = shExec.getExitCode(); LOG.warn("Exit code from container " + containerId + " is : " + exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the // container-executor's output if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) { LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode, e); logOutput(shExec.getOutput()); String diagnostics = "Exception from container-launch: \n" + StringUtils.stringifyException(e) + "\n" + shExec.getOutput(); container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics)); } else { container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode)); } return exitCode; } finally { if (shExec != null) { shExec.close(); } } return 0; } @Override /** * Filter the environment variables that may conflict with the ones set in * the docker image and write them out to an OutputStream. */ public void writeLaunchEnv(OutputStream out, Map<String, String> environment, Map<Path, List<String>> resources, List<String> command) throws IOException { ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); //Remove environments that may conflict with the ones in Docker image. Set<String> exclusionSet = new HashSet<String>(); exclusionSet.add(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME); exclusionSet.add(ApplicationConstants.Environment.HADOOP_YARN_HOME.name()); exclusionSet.add(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name()); exclusionSet.add(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name()); exclusionSet.add(ApplicationConstants.Environment.HADOOP_CONF_DIR.name()); exclusionSet.add(ApplicationConstants.Environment.JAVA_HOME.name()); if (environment != null) { for (Map.Entry<String,String> env : environment.entrySet()) { if (!exclusionSet.contains(env.getKey())) { sb.env(env.getKey().toString(), env.getValue().toString()); } } } if (resources != null) { for (Map.Entry<Path,List<String>> entry : resources.entrySet()) { for (String linkName : entry.getValue()) { sb.symlink(entry.getKey(), new Path(linkName)); } } } sb.command(command); PrintStream pout = null; PrintStream ps = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { pout = new PrintStream(out, false, "UTF-8"); if (LOG.isDebugEnabled()) { ps = new PrintStream(baos, false, "UTF-8"); sb.write(ps); } sb.write(pout); } finally { if (out != null) { out.close(); } if (ps != null) { ps.close(); } } if (LOG.isDebugEnabled()) { LOG.debug("Script: " + baos.toString("UTF-8")); } } private boolean saneDockerImage(String containerImageName) { return dockerImagePattern.matcher(containerImageName).matches(); } @Override public boolean signalContainer(ContainerSignalContext ctx) throws IOException { String user = ctx.getUser(); String pid = ctx.getPid(); Signal signal = ctx.getSignal(); if (LOG.isDebugEnabled()) { LOG.debug("Sending signal " + signal.getValue() + " to pid " + pid + " as user " + user); } if (!containerIsAlive(pid)) { return false; } try { killContainer(pid, signal); } catch (IOException e) { if (!containerIsAlive(pid)) { return false; } throw e; } return true; } @Override public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String pid = ctx.getPid(); return containerIsAlive(pid); } /** * Returns true if the process with the specified pid is alive. * * @param pid String pid * @return boolean true if the process is alive */ @VisibleForTesting public static boolean containerIsAlive(String pid) throws IOException { try { new ShellCommandExecutor(Shell.getCheckProcessIsAliveCommand(pid)) .execute(); // successful execution means process is alive return true; } catch (Shell.ExitCodeException e) { // failure (non-zero exit code) means process is not alive return false; } } /** * Send a specified signal to the specified pid * * @param pid the pid of the process [group] to signal. * @param signal signal to send * (for logging). */ protected void killContainer(String pid, Signal signal) throws IOException { new ShellCommandExecutor(Shell.getSignalKillCommand(signal.getValue(), pid)) .execute(); } @Override public void deleteAsUser(DeletionAsUserContext ctx) throws IOException, InterruptedException { Path subDir = ctx.getSubDir(); List<Path> baseDirs = ctx.getBasedirs(); if (baseDirs == null || baseDirs.size() == 0) { LOG.info("Deleting absolute path : " + subDir); if (!lfs.delete(subDir, true)) { //Maybe retry LOG.warn("delete returned false for path: [" + subDir + "]"); } return; } for (Path baseDir : baseDirs) { Path del = subDir == null ? baseDir : new Path(baseDir, subDir); LOG.info("Deleting path : " + del); if (!lfs.delete(del, true)) { LOG.warn("delete returned false for path: [" + del + "]"); } } } /** * Converts a directory list to a docker mount string * @param dirs * @return a string of mounts for docker */ private String toMount(List<String> dirs) { StringBuilder builder = new StringBuilder(); for (String dir : dirs) { builder.append(" -v " + dir + ":" + dir); } return builder.toString(); } //This class facilitates (only) the creation of platform-specific scripts that //will be used to launch the containers //TODO: This should be re-used from the DefaultContainerExecutor. private abstract class LocalWrapperScriptBuilder { private final Path wrapperScriptPath; public Path getWrapperScriptPath() { return wrapperScriptPath; } public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException { DataOutputStream out = null; PrintStream pout = null; try { out = lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE)); pout = new PrintStream(out, false, "UTF-8"); writeLocalWrapperScript(launchDst, pidFile, pout); } finally { IOUtils.cleanup(LOG, pout, out); } } protected abstract void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout); protected LocalWrapperScriptBuilder(Path containerWorkDir) { this.wrapperScriptPath = new Path(containerWorkDir, Shell.appendScriptExtension(DOCKER_CONTAINER_EXECUTOR_SCRIPT)); } } //TODO: This class too should be used from DefaultContainerExecutor. private final class UnixLocalWrapperScriptBuilder extends LocalWrapperScriptBuilder { private final Path sessionScriptPath; private final String dockerCommand; private final String dockerPidScript; public UnixLocalWrapperScriptBuilder(Path containerWorkDir, String dockerCommand, String dockerPidScript) { super(containerWorkDir); this.dockerCommand = dockerCommand; this.dockerPidScript = dockerPidScript; this.sessionScriptPath = new Path(containerWorkDir, Shell.appendScriptExtension(DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT)); } @Override public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException { writeSessionScript(launchDst, pidFile); super.writeLocalWrapperScript(launchDst, pidFile); } @Override public void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout) { String exitCodeFile = ContainerLaunch.getExitCodeFile( pidFile.toString()); String tmpFile = exitCodeFile + ".tmp"; pout.println("#!/usr/bin/env bash"); pout.println("bash \"" + sessionScriptPath.toString() + "\""); pout.println("rc=$?"); pout.println("echo $rc > \"" + tmpFile + "\""); pout.println("mv -f \"" + tmpFile + "\" \"" + exitCodeFile + "\""); pout.println("exit $rc"); } private void writeSessionScript(Path launchDst, Path pidFile) throws IOException { DataOutputStream out = null; PrintStream pout = null; try { out = lfs.create(sessionScriptPath, EnumSet.of(CREATE, OVERWRITE)); pout = new PrintStream(out, false, "UTF-8"); // We need to do a move as writing to a file is not atomic // Process reading a file being written to may get garbled data // hence write pid to tmp file first followed by a mv pout.println("#!/usr/bin/env bash"); pout.println(); pout.println("echo "+ dockerPidScript +" > " + pidFile.toString() + ".tmp"); pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile); pout.println(dockerCommand + " bash \"" + launchDst.toUri().getPath().toString() + "\""); } finally { IOUtils.cleanup(LOG, pout, out); } lfs.setPermission(sessionScriptPath, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); } } protected void createDir(Path dirPath, FsPermission perms, boolean createParent, String user) throws IOException { lfs.mkdir(dirPath, perms, createParent); if (!perms.equals(perms.applyUMask(lfs.getUMask()))) { lfs.setPermission(dirPath, perms); } } /** * Initialize the local directories for a particular user. * <ul>.mkdir * <li>$local.dir/usercache/$user</li> * </ul> */ void createUserLocalDirs(List<String> localDirs, String user) throws IOException { boolean userDirStatus = false; FsPermission userperms = new FsPermission(USER_PERM); for (String localDir : localDirs) { // create $local.dir/usercache/$user and its immediate parent try { createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user); } catch (IOException e) { LOG.warn("Unable to create the user directory : " + localDir, e); continue; } userDirStatus = true; } if (!userDirStatus) { throw new IOException("Not able to initialize user directories " + "in any of the configured local directories for user " + user); } } /** * Initialize the local cache directories for a particular user. * <ul> * <li>$local.dir/usercache/$user</li> * <li>$local.dir/usercache/$user/appcache</li> * <li>$local.dir/usercache/$user/filecache</li> * </ul> */ void createUserCacheDirs(List<String> localDirs, String user) throws IOException { LOG.info("Initializing user " + user); boolean appcacheDirStatus = false; boolean distributedCacheDirStatus = false; FsPermission appCachePerms = new FsPermission(APPCACHE_PERM); FsPermission fileperms = new FsPermission(FILECACHE_PERM); for (String localDir : localDirs) { // create $local.dir/usercache/$user/appcache Path localDirPath = new Path(localDir); final Path appDir = getAppcacheDir(localDirPath, user); try { createDir(appDir, appCachePerms, true, user); appcacheDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create app cache directory : " + appDir, e); } // create $local.dir/usercache/$user/filecache final Path distDir = getFileCacheDir(localDirPath, user); try { createDir(distDir, fileperms, true, user); distributedCacheDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create file cache directory : " + distDir, e); } } if (!appcacheDirStatus) { throw new IOException("Not able to initialize app-cache directories " + "in any of the configured local directories for user " + user); } if (!distributedCacheDirStatus) { throw new IOException( "Not able to initialize distributed-cache directories " + "in any of the configured local directories for user " + user); } } /** * Initialize the local directories for a particular user. * <ul> * <li>$local.dir/usercache/$user/appcache/$appid</li> * </ul> * @param localDirs */ void createAppDirs(List<String> localDirs, String user, String appId) throws IOException { boolean initAppDirStatus = false; FsPermission appperms = new FsPermission(APPDIR_PERM); for (String localDir : localDirs) { Path fullAppDir = getApplicationDir(new Path(localDir), user, appId); // create $local.dir/usercache/$user/appcache/$appId try { createDir(fullAppDir, appperms, true, user); initAppDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create app directory " + fullAppDir.toString(), e); } } if (!initAppDirStatus) { throw new IOException("Not able to initialize app directories " + "in any of the configured local directories for app " + appId.toString()); } } /** * Create application log directories on all disks. */ void createContainerLogDirs(String appId, String containerId, List<String> logDirs, String user) throws IOException { boolean containerLogDirStatus = false; FsPermission containerLogDirPerms = new FsPermission(LOGDIR_PERM); for (String rootLogDir : logDirs) { // create $log.dir/$appid/$containerid Path appLogDir = new Path(rootLogDir, appId); Path containerLogDir = new Path(appLogDir, containerId); try { createDir(containerLogDir, containerLogDirPerms, true, user); } catch (IOException e) { LOG.warn("Unable to create the container-log directory : " + appLogDir, e); continue; } containerLogDirStatus = true; } if (!containerLogDirStatus) { throw new IOException( "Not able to initialize container-log directories " + "in any of the configured local directories for container " + containerId); } } /** * Permissions for user dir. * $local.dir/usercache/$user */ static final short USER_PERM = (short) 0750; /** * Permissions for user appcache dir. * $local.dir/usercache/$user/appcache */ static final short APPCACHE_PERM = (short) 0710; /** * Permissions for user filecache dir. * $local.dir/usercache/$user/filecache */ static final short FILECACHE_PERM = (short) 0710; /** * Permissions for user app dir. * $local.dir/usercache/$user/appcache/$appId */ static final short APPDIR_PERM = (short) 0710; /** * Permissions for user log dir. * $logdir/$user/$appId */ static final short LOGDIR_PERM = (short) 0710; private long getDiskFreeSpace(Path base) throws IOException { return lfs.getFsStatus(base).getRemaining(); } private Path getApplicationDir(Path base, String user, String appId) { return new Path(getAppcacheDir(base, user), appId); } private Path getUserCacheDir(Path base, String user) { return new Path(new Path(base, ContainerLocalizer.USERCACHE), user); } private Path getAppcacheDir(Path base, String user) { return new Path(getUserCacheDir(base, user), ContainerLocalizer.APPCACHE); } private Path getFileCacheDir(Path base, String user) { return new Path(getUserCacheDir(base, user), ContainerLocalizer.FILECACHE); } protected Path getWorkingDir(List<String> localDirs, String user, String appId) throws IOException { Path appStorageDir = null; long totalAvailable = 0L; long[] availableOnDisk = new long[localDirs.size()]; int i = 0; // randomly choose the app directory // the chance of picking a directory is proportional to // the available space on the directory. // firstly calculate the sum of all available space on these directories for (String localDir : localDirs) { Path curBase = getApplicationDir(new Path(localDir), user, appId); long space = 0L; try { space = getDiskFreeSpace(curBase); } catch (IOException e) { LOG.warn("Unable to get Free Space for " + curBase.toString(), e); } availableOnDisk[i++] = space; totalAvailable += space; } // throw an IOException if totalAvailable is 0. if (totalAvailable <= 0L) { throw new IOException("Not able to find a working directory for " + user); } // make probability to pick a directory proportional to // the available space on the directory. long randomPosition = RandomUtils.nextLong() % totalAvailable; int dir = 0; // skip zero available space directory, // because totalAvailable is greater than 0 and randomPosition // is less than totalAvailable, we can find a valid directory // with nonzero available space. while (availableOnDisk[dir] == 0L) { dir++; } while (randomPosition > availableOnDisk[dir]) { randomPosition -= availableOnDisk[dir++]; } appStorageDir = getApplicationDir(new Path(localDirs.get(dir)), user, appId); return appStorageDir; } /** * Create application log directories on all disks. */ void createAppLogDirs(String appId, List<String> logDirs, String user) throws IOException { boolean appLogDirStatus = false; FsPermission appLogDirPerms = new FsPermission(LOGDIR_PERM); for (String rootLogDir : logDirs) { // create $log.dir/$appid Path appLogDir = new Path(rootLogDir, appId); try { createDir(appLogDir, appLogDirPerms, true, user); } catch (IOException e) { LOG.warn("Unable to create the app-log directory : " + appLogDir, e); continue; } appLogDirStatus = true; } if (!appLogDirStatus) { throw new IOException("Not able to initialize app-log directories " + "in any of the configured local directories for app " + appId); } } /** * @return the list of paths of given local directories */ private static List<Path> getPaths(List<String> dirs) { List<Path> paths = new ArrayList<Path>(dirs.size()); for (int i = 0; i < dirs.size(); i++) { paths.add(new Path(dirs.get(i))); } return paths; } }
31,512
35.558005
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.NodeHealthScriptRunner; /** * The class which provides functionality of checking the health of the node and * reporting back to the service for which the health checker has been asked to * report. */ public class NodeHealthCheckerService extends CompositeService { private NodeHealthScriptRunner nodeHealthScriptRunner; private LocalDirsHandlerService dirsHandler; static final String SEPARATOR = ";"; public NodeHealthCheckerService(NodeHealthScriptRunner scriptRunner, LocalDirsHandlerService dirHandlerService) { super(NodeHealthCheckerService.class.getName()); nodeHealthScriptRunner = scriptRunner; dirsHandler = dirHandlerService; } @Override protected void serviceInit(Configuration conf) throws Exception { if (nodeHealthScriptRunner != null) { addService(nodeHealthScriptRunner); } addService(dirsHandler); super.serviceInit(conf); } /** * @return the reporting string of health of the node */ String getHealthReport() { String scriptReport = (nodeHealthScriptRunner == null) ? "" : nodeHealthScriptRunner.getHealthReport(); if (scriptReport.equals("")) { return dirsHandler.getDisksHealthReport(false); } else { return scriptReport.concat(SEPARATOR + dirsHandler.getDisksHealthReport(false)); } } /** * @return <em>true</em> if the node is healthy */ boolean isHealthy() { boolean scriptHealthStatus = (nodeHealthScriptRunner == null) ? true : nodeHealthScriptRunner.isHealthy(); return scriptHealthStatus && dirsHandler.areDisksHealthy(); } /** * @return when the last time the node health status is reported */ long getLastHealthReportTime() { long diskCheckTime = dirsHandler.getLastDisksCheckTime(); long lastReportTime = (nodeHealthScriptRunner == null) ? diskCheckTime : Math.max(nodeHealthScriptRunner.getLastReportedTime(), diskCheckTime); return lastReportTime; } /** * @return the disk handler */ public LocalDirsHandlerService getDiskHandler() { return dirsHandler; } /** * @return the node health script runner */ NodeHealthScriptRunner getNodeHealthScriptRunner() { return nodeHealthScriptRunner; } }
3,212
31.13
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.DiskChecker; /** * Manages a list of local storage directories. */ public class DirectoryCollection { private static final Log LOG = LogFactory.getLog(DirectoryCollection.class); /** * The enum defines disk failure type. */ public enum DiskErrorCause { DISK_FULL, OTHER } static class DiskErrorInformation { DiskErrorCause cause; String message; DiskErrorInformation(DiskErrorCause cause, String message) { this.cause = cause; this.message = message; } } /** * The interface provides a callback when localDirs is changed. */ public interface DirsChangeListener { void onDirsChanged(); } /** * Returns a merged list which contains all the elements of l1 and l2 * @param l1 the first list to be included * @param l2 the second list to be included * @return a new list containing all the elements of the first and second list */ static List<String> concat(List<String> l1, List<String> l2) { List<String> ret = new ArrayList<String>(l1.size() + l2.size()); ret.addAll(l1); ret.addAll(l2); return ret; } // Good local storage directories private List<String> localDirs; private List<String> errorDirs; private List<String> fullDirs; private int numFailures; private float diskUtilizationPercentageCutoff; private long diskUtilizationSpaceCutoff; private int goodDirsDiskUtilizationPercentage; private Set<DirsChangeListener> dirsChangeListeners; /** * Create collection for the directories specified. No check for free space. * * @param dirs * directories to be monitored */ public DirectoryCollection(String[] dirs) { this(dirs, 100.0F, 0); } /** * Create collection for the directories specified. Users must specify the * maximum percentage of disk utilization allowed. Minimum amount of disk * space is not checked. * * @param dirs * directories to be monitored * @param utilizationPercentageCutOff * percentage of disk that can be used before the dir is taken out of * the good dirs list * */ public DirectoryCollection(String[] dirs, float utilizationPercentageCutOff) { this(dirs, utilizationPercentageCutOff, 0); } /** * Create collection for the directories specified. Users must specify the * minimum amount of free space that must be available for the dir to be used. * * @param dirs * directories to be monitored * @param utilizationSpaceCutOff * minimum space, in MB, that must be available on the disk for the * dir to be marked as good * */ public DirectoryCollection(String[] dirs, long utilizationSpaceCutOff) { this(dirs, 100.0F, utilizationSpaceCutOff); } /** * Create collection for the directories specified. Users must specify the * maximum percentage of disk utilization allowed and the minimum amount of * free space that must be available for the dir to be used. If either check * fails the dir is removed from the good dirs list. * * @param dirs * directories to be monitored * @param utilizationPercentageCutOff * percentage of disk that can be used before the dir is taken out of * the good dirs list * @param utilizationSpaceCutOff * minimum space, in MB, that must be available on the disk for the * dir to be marked as good * */ public DirectoryCollection(String[] dirs, float utilizationPercentageCutOff, long utilizationSpaceCutOff) { localDirs = new CopyOnWriteArrayList<String>(dirs); errorDirs = new CopyOnWriteArrayList<String>(); fullDirs = new CopyOnWriteArrayList<String>(); diskUtilizationPercentageCutoff = utilizationPercentageCutOff < 0.0F ? 0.0F : (utilizationPercentageCutOff > 100.0F ? 100.0F : utilizationPercentageCutOff); diskUtilizationSpaceCutoff = utilizationSpaceCutOff < 0 ? 0 : utilizationSpaceCutOff; dirsChangeListeners = new HashSet<DirsChangeListener>(); } synchronized void registerDirsChangeListener( DirsChangeListener listener) { if (dirsChangeListeners.add(listener)) { listener.onDirsChanged(); } } synchronized void deregisterDirsChangeListener( DirsChangeListener listener) { dirsChangeListeners.remove(listener); } /** * @return the current valid directories */ synchronized List<String> getGoodDirs() { return Collections.unmodifiableList(localDirs); } /** * @return the failed directories */ synchronized List<String> getFailedDirs() { return Collections.unmodifiableList( DirectoryCollection.concat(errorDirs, fullDirs)); } /** * @return the directories that have used all disk space */ synchronized List<String> getFullDirs() { return fullDirs; } /** * @return total the number of directory failures seen till now */ synchronized int getNumFailures() { return numFailures; } /** * Create any non-existent directories and parent directories, updating the * list of valid directories if necessary. * @param localFs local file system to use * @param perm absolute permissions to use for any directories created * @return true if there were no errors, false if at least one error occurred */ synchronized boolean createNonExistentDirs(FileContext localFs, FsPermission perm) { boolean failed = false; for (final String dir : localDirs) { try { createDir(localFs, new Path(dir), perm); } catch (IOException e) { LOG.warn("Unable to create directory " + dir + " error " + e.getMessage() + ", removing from the list of valid directories."); localDirs.remove(dir); errorDirs.add(dir); numFailures++; failed = true; } } return !failed; } /** * Check the health of current set of local directories(good and failed), * updating the list of valid directories if necessary. * * @return <em>true</em> if there is a new disk-failure identified in this * checking or a failed directory passes the disk check <em>false</em> * otherwise. */ synchronized boolean checkDirs() { boolean setChanged = false; Set<String> preCheckGoodDirs = new HashSet<String>(localDirs); Set<String> preCheckFullDirs = new HashSet<String>(fullDirs); Set<String> preCheckOtherErrorDirs = new HashSet<String>(errorDirs); List<String> failedDirs = DirectoryCollection.concat(errorDirs, fullDirs); List<String> allLocalDirs = DirectoryCollection.concat(localDirs, failedDirs); Map<String, DiskErrorInformation> dirsFailedCheck = testDirs(allLocalDirs); localDirs.clear(); errorDirs.clear(); fullDirs.clear(); for (Map.Entry<String, DiskErrorInformation> entry : dirsFailedCheck .entrySet()) { String dir = entry.getKey(); DiskErrorInformation errorInformation = entry.getValue(); switch (entry.getValue().cause) { case DISK_FULL: fullDirs.add(entry.getKey()); break; case OTHER: errorDirs.add(entry.getKey()); break; } if (preCheckGoodDirs.contains(dir)) { LOG.warn("Directory " + dir + " error, " + errorInformation.message + ", removing from list of valid directories"); setChanged = true; numFailures++; } } for (String dir : allLocalDirs) { if (!dirsFailedCheck.containsKey(dir)) { localDirs.add(dir); if (preCheckFullDirs.contains(dir) || preCheckOtherErrorDirs.contains(dir)) { setChanged = true; LOG.info("Directory " + dir + " passed disk check, adding to list of valid directories."); } } } Set<String> postCheckFullDirs = new HashSet<String>(fullDirs); Set<String> postCheckOtherDirs = new HashSet<String>(errorDirs); for (String dir : preCheckFullDirs) { if (postCheckOtherDirs.contains(dir)) { LOG.warn("Directory " + dir + " error " + dirsFailedCheck.get(dir).message); } } for (String dir : preCheckOtherErrorDirs) { if (postCheckFullDirs.contains(dir)) { LOG.warn("Directory " + dir + " error " + dirsFailedCheck.get(dir).message); } } setGoodDirsDiskUtilizationPercentage(); if (setChanged) { for (DirsChangeListener listener : dirsChangeListeners) { listener.onDirsChanged(); } } return setChanged; } Map<String, DiskErrorInformation> testDirs(List<String> dirs) { HashMap<String, DiskErrorInformation> ret = new HashMap<String, DiskErrorInformation>(); for (final String dir : dirs) { String msg; try { File testDir = new File(dir); DiskChecker.checkDir(testDir); if (isDiskUsageOverPercentageLimit(testDir)) { msg = "used space above threshold of " + diskUtilizationPercentageCutoff + "%"; ret.put(dir, new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg)); continue; } else if (isDiskFreeSpaceUnderLimit(testDir)) { msg = "free space below limit of " + diskUtilizationSpaceCutoff + "MB"; ret.put(dir, new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg)); continue; } // create a random dir to make sure fs isn't in read-only mode verifyDirUsingMkdir(testDir); } catch (IOException ie) { ret.put(dir, new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage())); } } return ret; } /** * Function to test whether a dir is working correctly by actually creating a * random directory. * * @param dir * the dir to test */ private void verifyDirUsingMkdir(File dir) throws IOException { String randomDirName = RandomStringUtils.randomAlphanumeric(5); File target = new File(dir, randomDirName); int i = 0; while (target.exists()) { randomDirName = RandomStringUtils.randomAlphanumeric(5) + i; target = new File(dir, randomDirName); i++; } try { DiskChecker.checkDir(target); } finally { FileUtils.deleteQuietly(target); } } private boolean isDiskUsageOverPercentageLimit(File dir) { float freePercentage = 100 * (dir.getUsableSpace() / (float) dir.getTotalSpace()); float usedPercentage = 100.0F - freePercentage; return (usedPercentage > diskUtilizationPercentageCutoff || usedPercentage >= 100.0F); } private boolean isDiskFreeSpaceUnderLimit(File dir) { long freeSpace = dir.getUsableSpace() / (1024 * 1024); return freeSpace < this.diskUtilizationSpaceCutoff; } private void createDir(FileContext localFs, Path dir, FsPermission perm) throws IOException { if (dir == null) { return; } try { localFs.getFileStatus(dir); } catch (FileNotFoundException e) { createDir(localFs, dir.getParent(), perm); localFs.mkdir(dir, perm, false); if (!perm.equals(perm.applyUMask(localFs.getUMask()))) { localFs.setPermission(dir, perm); } } } public float getDiskUtilizationPercentageCutoff() { return diskUtilizationPercentageCutoff; } public void setDiskUtilizationPercentageCutoff( float diskUtilizationPercentageCutoff) { this.diskUtilizationPercentageCutoff = diskUtilizationPercentageCutoff < 0.0F ? 0.0F : (diskUtilizationPercentageCutoff > 100.0F ? 100.0F : diskUtilizationPercentageCutoff); } public long getDiskUtilizationSpaceCutoff() { return diskUtilizationSpaceCutoff; } public void setDiskUtilizationSpaceCutoff(long diskUtilizationSpaceCutoff) { diskUtilizationSpaceCutoff = diskUtilizationSpaceCutoff < 0 ? 0 : diskUtilizationSpaceCutoff; this.diskUtilizationSpaceCutoff = diskUtilizationSpaceCutoff; } private void setGoodDirsDiskUtilizationPercentage() { long totalSpace = 0; long usableSpace = 0; for (String dir : localDirs) { File f = new File(dir); if (!f.isDirectory()) { continue; } totalSpace += f.getTotalSpace(); usableSpace += f.getUsableSpace(); } if (totalSpace != 0) { long tmp = ((totalSpace - usableSpace) * 100) / totalSpace; if (Integer.MIN_VALUE < tmp && Integer.MAX_VALUE > tmp) { goodDirsDiskUtilizationPercentage = (int) tmp; } } else { // got no good dirs goodDirsDiskUtilizationPercentage = 0; } } public int getGoodDirsDiskUtilizationPercentage() { return goodDirsDiskUtilizationPercentage; } }
14,374
30.524123
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.IOException; import java.net.ConnectException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Random; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionUtil; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ResourceManagerConstants; import org.apache.hadoop.yarn.server.api.ResourceTracker; import org.apache.hadoop.yarn.server.api.ServerRMProxy; import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.ResourceUtilization; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider; import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; import org.apache.hadoop.yarn.util.YarnVersionInfo; import com.google.common.annotations.VisibleForTesting; public class NodeStatusUpdaterImpl extends AbstractService implements NodeStatusUpdater { public static final String YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS = YarnConfiguration.NM_PREFIX + "duration-to-track-stopped-containers"; private static final Log LOG = LogFactory.getLog(NodeStatusUpdaterImpl.class); private final Object heartbeatMonitor = new Object(); private final Context context; private final Dispatcher dispatcher; private NodeId nodeId; private long nextHeartBeatInterval; private ResourceTracker resourceTracker; private Resource totalResource; private int httpPort; private String nodeManagerVersionId; private String minimumResourceManagerVersion; private volatile boolean isStopped; private boolean tokenKeepAliveEnabled; private long tokenRemovalDelayMs; /** Keeps track of when the next keep alive request should be sent for an app*/ private Map<ApplicationId, Long> appTokenKeepAliveMap = new HashMap<ApplicationId, Long>(); private Random keepAliveDelayRandom = new Random(); // It will be used to track recently stopped containers on node manager, this // is to avoid the misleading no-such-container exception messages on NM, when // the AM finishes it informs the RM to stop the may-be-already-completed // containers. private final Map<ContainerId, Long> recentlyStoppedContainers; // Save the reported completed containers in case of lost heartbeat responses. // These completed containers will be sent again till a successful response. private final Map<ContainerId, ContainerStatus> pendingCompletedContainers; // Duration for which to track recently stopped container. private long durationToTrackStoppedContainers; private boolean logAggregationEnabled; private final List<LogAggregationReport> logAggregationReportForAppsTempList; private final NodeHealthCheckerService healthChecker; private final NodeManagerMetrics metrics; private Runnable statusUpdaterRunnable; private Thread statusUpdater; private long rmIdentifier = ResourceManagerConstants.RM_INVALID_IDENTIFIER; private boolean registeredWithRM = false; Set<ContainerId> pendingContainersToRemove = new HashSet<ContainerId>(); private final NodeLabelsProvider nodeLabelsProvider; private final boolean hasNodeLabelsProvider; public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) { this(context, dispatcher, healthChecker, metrics, null); } public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, NodeLabelsProvider nodeLabelsProvider) { super(NodeStatusUpdaterImpl.class.getName()); this.healthChecker = healthChecker; this.nodeLabelsProvider = nodeLabelsProvider; this.hasNodeLabelsProvider = (nodeLabelsProvider != null); this.context = context; this.dispatcher = dispatcher; this.metrics = metrics; this.recentlyStoppedContainers = new LinkedHashMap<ContainerId, Long>(); this.pendingCompletedContainers = new HashMap<ContainerId, ContainerStatus>(); this.logAggregationReportForAppsTempList = new ArrayList<LogAggregationReport>(); } @Override protected void serviceInit(Configuration conf) throws Exception { int memoryMb = NodeManagerHardwareUtils.getContainerMemoryMB(conf); float vMemToPMem = conf.getFloat( YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); int virtualMemoryMb = (int)Math.ceil(memoryMb * vMemToPMem); int virtualCores = NodeManagerHardwareUtils.getVCores(conf); LOG.info("Nodemanager resources: memory set to " + memoryMb + "MB."); LOG.info("Nodemanager resources: vcores set to " + virtualCores + "."); this.totalResource = Resource.newInstance(memoryMb, virtualCores); metrics.addResource(totalResource); this.tokenKeepAliveEnabled = isTokenKeepAliveEnabled(conf); this.tokenRemovalDelayMs = conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); this.minimumResourceManagerVersion = conf.get( YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION, YarnConfiguration.DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION); // Default duration to track stopped containers on nodemanager is 10Min. // This should not be assigned very large value as it will remember all the // containers stopped during that time. durationToTrackStoppedContainers = conf.getLong(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS, 600000); if (durationToTrackStoppedContainers < 0) { String message = "Invalid configuration for " + YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " default " + "value is 10Min(600000)."; LOG.error(message); throw new YarnException(message); } if (LOG.isDebugEnabled()) { LOG.debug(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " :" + durationToTrackStoppedContainers); } super.serviceInit(conf); LOG.info("Initialized nodemanager with :" + " physical-memory=" + memoryMb + " virtual-memory=" + virtualMemoryMb + " virtual-cores=" + virtualCores); this.logAggregationEnabled = conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED); } @Override protected void serviceStart() throws Exception { // NodeManager is the last service to start, so NodeId is available. this.nodeId = this.context.getNodeId(); LOG.info("Node ID assigned is : " + this.nodeId); this.httpPort = this.context.getHttpPort(); this.nodeManagerVersionId = YarnVersionInfo.getVersion(); try { // Registration has to be in start so that ContainerManager can get the // perNM tokens needed to authenticate ContainerTokens. this.resourceTracker = getRMClient(); registerWithRM(); super.serviceStart(); startStatusUpdater(); } catch (Exception e) { String errorMessage = "Unexpected error starting NodeStatusUpdater"; LOG.error(errorMessage, e); throw new YarnRuntimeException(e); } } @Override protected void serviceStop() throws Exception { // the isStopped check is for avoiding multiple unregistrations. if (this.registeredWithRM && !this.isStopped && !isNMUnderSupervisionWithRecoveryEnabled() && !context.getDecommissioned()) { unRegisterNM(); } // Interrupt the updater. this.isStopped = true; stopRMProxy(); super.serviceStop(); } private boolean isNMUnderSupervisionWithRecoveryEnabled() { Configuration config = getConfig(); return config.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED) && config.getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED); } private void unRegisterNM() { RecordFactory recordFactory = RecordFactoryPBImpl.get(); UnRegisterNodeManagerRequest request = recordFactory .newRecordInstance(UnRegisterNodeManagerRequest.class); request.setNodeId(this.nodeId); try { resourceTracker.unRegisterNodeManager(request); LOG.info("Successfully Unregistered the Node " + this.nodeId + " with ResourceManager."); } catch (Exception e) { LOG.warn("Unregistration of the Node " + this.nodeId + " failed.", e); } } protected void rebootNodeStatusUpdaterAndRegisterWithRM() { // Interrupt the updater. this.isStopped = true; try { statusUpdater.join(); registerWithRM(); statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); this.isStopped = false; statusUpdater.start(); LOG.info("NodeStatusUpdater thread is reRegistered and restarted"); } catch (Exception e) { String errorMessage = "Unexpected error rebooting NodeStatusUpdater"; LOG.error(errorMessage, e); throw new YarnRuntimeException(e); } } @VisibleForTesting protected void stopRMProxy() { if(this.resourceTracker != null) { RPC.stopProxy(this.resourceTracker); } } @Private protected boolean isTokenKeepAliveEnabled(Configuration conf) { return conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED) && UserGroupInformation.isSecurityEnabled(); } @VisibleForTesting protected ResourceTracker getRMClient() throws IOException { Configuration conf = getConfig(); return ServerRMProxy.createRMProxy(conf, ResourceTracker.class); } @VisibleForTesting protected void registerWithRM() throws YarnException, IOException { List<NMContainerStatus> containerReports = getNMContainerStatuses(); Set<NodeLabel> nodeLabels = null; if (hasNodeLabelsProvider) { nodeLabels = nodeLabelsProvider.getNodeLabels(); nodeLabels = (null == nodeLabels) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET : nodeLabels; } RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource, nodeManagerVersionId, containerReports, getRunningApplications(), nodeLabels); if (containerReports != null) { LOG.info("Registering with RM using containers :" + containerReports); } RegisterNodeManagerResponse regNMResponse = resourceTracker.registerNodeManager(request); this.rmIdentifier = regNMResponse.getRMIdentifier(); // if the Resource Manager instructs NM to shutdown. if (NodeAction.SHUTDOWN.equals(regNMResponse.getNodeAction())) { String message = "Message from ResourceManager: " + regNMResponse.getDiagnosticsMessage(); throw new YarnRuntimeException( "Recieved SHUTDOWN signal from Resourcemanager, Registration of NodeManager failed, " + message); } // if ResourceManager version is too old then shutdown if (!minimumResourceManagerVersion.equals("NONE")){ if (minimumResourceManagerVersion.equals("EqualToNM")){ minimumResourceManagerVersion = nodeManagerVersionId; } String rmVersion = regNMResponse.getRMVersion(); if (rmVersion == null) { String message = "The Resource Manager's did not return a version. " + "Valid version cannot be checked."; throw new YarnRuntimeException("Shutting down the Node Manager. " + message); } if (VersionUtil.compareVersions(rmVersion,minimumResourceManagerVersion) < 0) { String message = "The Resource Manager's version (" + rmVersion +") is less than the minimum " + "allowed version " + minimumResourceManagerVersion; throw new YarnRuntimeException("Shutting down the Node Manager on RM " + "version error, " + message); } } this.registeredWithRM = true; MasterKey masterKey = regNMResponse.getContainerTokenMasterKey(); // do this now so that its set before we start heartbeating to RM // It is expected that status updater is started by this point and // RM gives the shared secret in registration during // StatusUpdater#start(). if (masterKey != null) { this.context.getContainerTokenSecretManager().setMasterKey(masterKey); } masterKey = regNMResponse.getNMTokenMasterKey(); if (masterKey != null) { this.context.getNMTokenSecretManager().setMasterKey(masterKey); } StringBuilder successfullRegistrationMsg = new StringBuilder(); successfullRegistrationMsg.append("Registered with ResourceManager as ") .append(this.nodeId).append(" with total resource of ") .append(this.totalResource); if (regNMResponse.getAreNodeLabelsAcceptedByRM()) { successfullRegistrationMsg .append(" and with following Node label(s) : {") .append(StringUtils.join(",", nodeLabels)).append("}"); } else if (hasNodeLabelsProvider) { //case where provider is set but RM did not accept the Node Labels LOG.error(regNMResponse.getDiagnosticsMessage()); } LOG.info(successfullRegistrationMsg); LOG.info("Notifying ContainerManager to unblock new container-requests"); ((ContainerManagerImpl) this.context.getContainerManager()) .setBlockNewContainerRequests(false); } private List<ApplicationId> createKeepAliveApplicationList() { if (!tokenKeepAliveEnabled) { return Collections.emptyList(); } List<ApplicationId> appList = new ArrayList<ApplicationId>(); for (Iterator<Entry<ApplicationId, Long>> i = this.appTokenKeepAliveMap.entrySet().iterator(); i.hasNext();) { Entry<ApplicationId, Long> e = i.next(); ApplicationId appId = e.getKey(); Long nextKeepAlive = e.getValue(); if (!this.context.getApplications().containsKey(appId)) { // Remove if the application has finished. i.remove(); } else if (System.currentTimeMillis() > nextKeepAlive) { // KeepAlive list for the next hearbeat. appList.add(appId); trackAppForKeepAlive(appId); } } return appList; } private NodeStatus getNodeStatus(int responseId) throws IOException { NodeHealthStatus nodeHealthStatus = this.context.getNodeHealthStatus(); nodeHealthStatus.setHealthReport(healthChecker.getHealthReport()); nodeHealthStatus.setIsNodeHealthy(healthChecker.isHealthy()); nodeHealthStatus.setLastHealthReportTime(healthChecker .getLastHealthReportTime()); if (LOG.isDebugEnabled()) { LOG.debug("Node's health-status : " + nodeHealthStatus.getIsNodeHealthy() + ", " + nodeHealthStatus.getHealthReport()); } List<ContainerStatus> containersStatuses = getContainerStatuses(); ResourceUtilization containersUtilization = getContainersUtilization(); NodeStatus nodeStatus = NodeStatus.newInstance(nodeId, responseId, containersStatuses, createKeepAliveApplicationList(), nodeHealthStatus, containersUtilization); return nodeStatus; } /** * Get the aggregated utilization of the containers in this node. * @return Resource utilization of all the containers. */ private ResourceUtilization getContainersUtilization() { ContainerManagerImpl containerManager = (ContainerManagerImpl) this.context.getContainerManager(); ContainersMonitor containersMonitor = containerManager.getContainersMonitor(); return containersMonitor.getContainersUtilization(); } // Iterate through the NMContext and clone and get all the containers' // statuses. If it's a completed container, add into the // recentlyStoppedContainers collections. @VisibleForTesting protected List<ContainerStatus> getContainerStatuses() throws IOException { List<ContainerStatus> containerStatuses = new ArrayList<ContainerStatus>(); for (Container container : this.context.getContainers().values()) { ContainerId containerId = container.getContainerId(); ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus = container.cloneAndGetContainerStatus(); if (containerStatus.getState() == ContainerState.COMPLETE) { if (isApplicationStopped(applicationId)) { if (LOG.isDebugEnabled()) { LOG.debug(applicationId + " is completing, " + " remove " + containerId + " from NM context."); } context.getContainers().remove(containerId); pendingCompletedContainers.put(containerId, containerStatus); } else { if (!isContainerRecentlyStopped(containerId)) { pendingCompletedContainers.put(containerId, containerStatus); // Adding to finished containers cache. Cache will keep it around at // least for #durationToTrackStoppedContainers duration. In the // subsequent call to stop container it will get removed from cache. addCompletedContainer(containerId); } } } else { containerStatuses.add(containerStatus); } } containerStatuses.addAll(pendingCompletedContainers.values()); if (LOG.isDebugEnabled()) { LOG.debug("Sending out " + containerStatuses.size() + " container statuses: " + containerStatuses); } return containerStatuses; } private List<ApplicationId> getRunningApplications() { List<ApplicationId> runningApplications = new ArrayList<ApplicationId>(); runningApplications.addAll(this.context.getApplications().keySet()); return runningApplications; } // These NMContainerStatus are sent on NM registration and used by YARN only. private List<NMContainerStatus> getNMContainerStatuses() throws IOException { List<NMContainerStatus> containerStatuses = new ArrayList<NMContainerStatus>(); for (Container container : this.context.getContainers().values()) { ContainerId containerId = container.getContainerId(); ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); if (!this.context.getApplications().containsKey(applicationId)) { context.getContainers().remove(containerId); continue; } NMContainerStatus status = container.getNMContainerStatus(); containerStatuses.add(status); if (status.getContainerState() == ContainerState.COMPLETE) { // Adding to finished containers cache. Cache will keep it around at // least for #durationToTrackStoppedContainers duration. In the // subsequent call to stop container it will get removed from cache. addCompletedContainer(containerId); } } LOG.info("Sending out " + containerStatuses.size() + " NM container statuses: " + containerStatuses); return containerStatuses; } private boolean isApplicationStopped(ApplicationId applicationId) { if (!this.context.getApplications().containsKey(applicationId)) { return true; } ApplicationState applicationState = this.context.getApplications().get( applicationId).getApplicationState(); if (applicationState == ApplicationState.FINISHING_CONTAINERS_WAIT || applicationState == ApplicationState.APPLICATION_RESOURCES_CLEANINGUP || applicationState == ApplicationState.FINISHED) { return true; } else { return false; } } @Override public void addCompletedContainer(ContainerId containerId) { synchronized (recentlyStoppedContainers) { removeVeryOldStoppedContainersFromCache(); if (!recentlyStoppedContainers.containsKey(containerId)) { recentlyStoppedContainers.put(containerId, System.currentTimeMillis() + durationToTrackStoppedContainers); } } } @VisibleForTesting @Private public void removeOrTrackCompletedContainersFromContext( List<ContainerId> containerIds) throws IOException { Set<ContainerId> removedContainers = new HashSet<ContainerId>(); pendingContainersToRemove.addAll(containerIds); Iterator<ContainerId> iter = pendingContainersToRemove.iterator(); while (iter.hasNext()) { ContainerId containerId = iter.next(); // remove the container only if the container is at DONE state Container nmContainer = context.getContainers().get(containerId); if (nmContainer == null) { iter.remove(); } else if (nmContainer.getContainerState().equals( org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.DONE)) { context.getContainers().remove(containerId); removedContainers.add(containerId); iter.remove(); } } if (!removedContainers.isEmpty()) { LOG.info("Removed completed containers from NM context: " + removedContainers); } pendingCompletedContainers.clear(); } private void trackAppsForKeepAlive(List<ApplicationId> appIds) { if (tokenKeepAliveEnabled && appIds != null && appIds.size() > 0) { for (ApplicationId appId : appIds) { trackAppForKeepAlive(appId); } } } private void trackAppForKeepAlive(ApplicationId appId) { // Next keepAlive request for app between 0.7 & 0.9 of when the token will // likely expire. long nextTime = System.currentTimeMillis() + (long) (0.7 * tokenRemovalDelayMs + (0.2 * tokenRemovalDelayMs * keepAliveDelayRandom.nextInt(100))/100); appTokenKeepAliveMap.put(appId, nextTime); } @Override public void sendOutofBandHeartBeat() { synchronized (this.heartbeatMonitor) { this.heartbeatMonitor.notify(); } } public boolean isContainerRecentlyStopped(ContainerId containerId) { synchronized (recentlyStoppedContainers) { return recentlyStoppedContainers.containsKey(containerId); } } @Override public void clearFinishedContainersFromCache() { synchronized (recentlyStoppedContainers) { recentlyStoppedContainers.clear(); } } @Private @VisibleForTesting public void removeVeryOldStoppedContainersFromCache() { synchronized (recentlyStoppedContainers) { long currentTime = System.currentTimeMillis(); Iterator<ContainerId> i = recentlyStoppedContainers.keySet().iterator(); while (i.hasNext()) { ContainerId cid = i.next(); if (recentlyStoppedContainers.get(cid) < currentTime) { if (!context.getContainers().containsKey(cid)) { i.remove(); try { context.getNMStateStore().removeContainer(cid); } catch (IOException e) { LOG.error("Unable to remove container " + cid + " in store", e); } } } else { break; } } } } @Override public long getRMIdentifier() { return this.rmIdentifier; } private static Map<ApplicationId, Credentials> parseCredentials( Map<ApplicationId, ByteBuffer> systemCredentials) throws IOException { Map<ApplicationId, Credentials> map = new HashMap<ApplicationId, Credentials>(); for (Map.Entry<ApplicationId, ByteBuffer> entry : systemCredentials.entrySet()) { Credentials credentials = new Credentials(); DataInputByteBuffer buf = new DataInputByteBuffer(); ByteBuffer buffer = entry.getValue(); buffer.rewind(); buf.reset(buffer); credentials.readTokenStorageStream(buf); map.put(entry.getKey(), credentials); } if (LOG.isDebugEnabled()) { for (Map.Entry<ApplicationId, Credentials> entry : map.entrySet()) { LOG.debug("Retrieved credentials form RM for " + entry.getKey() + ": " + entry.getValue().getAllTokens()); } } return map; } protected void startStatusUpdater() { statusUpdaterRunnable = new Runnable() { @Override @SuppressWarnings("unchecked") public void run() { int lastHeartbeatID = 0; Set<NodeLabel> lastUpdatedNodeLabelsToRM = null; if (hasNodeLabelsProvider) { lastUpdatedNodeLabelsToRM = nodeLabelsProvider.getNodeLabels(); lastUpdatedNodeLabelsToRM = (null == lastUpdatedNodeLabelsToRM) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET : lastUpdatedNodeLabelsToRM; } while (!isStopped) { // Send heartbeat try { NodeHeartbeatResponse response = null; Set<NodeLabel> nodeLabelsForHeartbeat = null; NodeStatus nodeStatus = getNodeStatus(lastHeartbeatID); if (hasNodeLabelsProvider) { nodeLabelsForHeartbeat = nodeLabelsProvider.getNodeLabels(); // if the provider returns null then consider empty labels are set nodeLabelsForHeartbeat = (nodeLabelsForHeartbeat == null) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET : nodeLabelsForHeartbeat; if (!areNodeLabelsUpdated(nodeLabelsForHeartbeat, lastUpdatedNodeLabelsToRM)) { // if nodelabels have not changed then no need to send nodeLabelsForHeartbeat = null; } } NodeHeartbeatRequest request = NodeHeartbeatRequest.newInstance(nodeStatus, NodeStatusUpdaterImpl.this.context .getContainerTokenSecretManager().getCurrentKey(), NodeStatusUpdaterImpl.this.context .getNMTokenSecretManager().getCurrentKey(), nodeLabelsForHeartbeat); if (logAggregationEnabled) { // pull log aggregation status for application running in this NM List<LogAggregationReport> logAggregationReports = getLogAggregationReportsForApps(context .getLogAggregationStatusForApps()); if (logAggregationReports != null && !logAggregationReports.isEmpty()) { request.setLogAggregationReportsForApps(logAggregationReports); } } response = resourceTracker.nodeHeartbeat(request); //get next heartbeat interval from response nextHeartBeatInterval = response.getNextHeartBeatInterval(); updateMasterKeys(response); if (response.getNodeAction() == NodeAction.SHUTDOWN) { LOG .warn("Recieved SHUTDOWN signal from Resourcemanager as part of heartbeat," + " hence shutting down."); LOG.warn("Message from ResourceManager: " + response.getDiagnosticsMessage()); context.setDecommissioned(true); dispatcher.getEventHandler().handle( new NodeManagerEvent(NodeManagerEventType.SHUTDOWN)); break; } if (response.getNodeAction() == NodeAction.RESYNC) { LOG.warn("Node is out of sync with ResourceManager," + " hence resyncing."); LOG.warn("Message from ResourceManager: " + response.getDiagnosticsMessage()); // Invalidate the RMIdentifier while resync NodeStatusUpdaterImpl.this.rmIdentifier = ResourceManagerConstants.RM_INVALID_IDENTIFIER; dispatcher.getEventHandler().handle( new NodeManagerEvent(NodeManagerEventType.RESYNC)); pendingCompletedContainers.clear(); break; } if (response.getAreNodeLabelsAcceptedByRM()) { lastUpdatedNodeLabelsToRM = nodeLabelsForHeartbeat; LOG.info("Node Labels {" + StringUtils.join(",", nodeLabelsForHeartbeat) + "} were Accepted by RM "); } else if (nodeLabelsForHeartbeat != null) { // case where NodeLabelsProvider is set and updated labels were // sent to RM and RM rejected the labels LOG.error(response.getDiagnosticsMessage()); } // Explicitly put this method after checking the resync response. We // don't want to remove the completed containers before resync // because these completed containers will be reported back to RM // when NM re-registers with RM. // Only remove the cleanedup containers that are acked removeOrTrackCompletedContainersFromContext(response .getContainersToBeRemovedFromNM()); logAggregationReportForAppsTempList.clear(); lastHeartbeatID = response.getResponseId(); List<ContainerId> containersToCleanup = response .getContainersToCleanup(); if (!containersToCleanup.isEmpty()) { dispatcher.getEventHandler().handle( new CMgrCompletedContainersEvent(containersToCleanup, CMgrCompletedContainersEvent.Reason.BY_RESOURCEMANAGER)); } List<ApplicationId> appsToCleanup = response.getApplicationsToCleanup(); //Only start tracking for keepAlive on FINISH_APP trackAppsForKeepAlive(appsToCleanup); if (!appsToCleanup.isEmpty()) { dispatcher.getEventHandler().handle( new CMgrCompletedAppsEvent(appsToCleanup, CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); } Map<ApplicationId, ByteBuffer> systemCredentials = response.getSystemCredentialsForApps(); if (systemCredentials != null && !systemCredentials.isEmpty()) { ((NMContext) context) .setSystemCrendentialsForApps(parseCredentials(systemCredentials)); } } catch (ConnectException e) { //catch and throw the exception if tried MAX wait time to connect RM dispatcher.getEventHandler().handle( new NodeManagerEvent(NodeManagerEventType.SHUTDOWN)); throw new YarnRuntimeException(e); } catch (Throwable e) { // TODO Better error handling. Thread can die with the rest of the // NM still running. LOG.error("Caught exception in status-updater", e); } finally { synchronized (heartbeatMonitor) { nextHeartBeatInterval = nextHeartBeatInterval <= 0 ? YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS : nextHeartBeatInterval; try { heartbeatMonitor.wait(nextHeartBeatInterval); } catch (InterruptedException e) { // Do Nothing } } } } } /** * Caller should take care of sending non null nodelabels for both * arguments * * @param nodeLabelsNew * @param nodeLabelsOld * @return if the New node labels are diff from the older one. */ private boolean areNodeLabelsUpdated(Set<NodeLabel> nodeLabelsNew, Set<NodeLabel> nodeLabelsOld) { if (nodeLabelsNew.size() != nodeLabelsOld.size() || !nodeLabelsOld.containsAll(nodeLabelsNew)) { return true; } return false; } private void updateMasterKeys(NodeHeartbeatResponse response) { // See if the master-key has rolled over MasterKey updatedMasterKey = response.getContainerTokenMasterKey(); if (updatedMasterKey != null) { // Will be non-null only on roll-over on RM side context.getContainerTokenSecretManager().setMasterKey(updatedMasterKey); } updatedMasterKey = response.getNMTokenMasterKey(); if (updatedMasterKey != null) { context.getNMTokenSecretManager().setMasterKey(updatedMasterKey); } } }; statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); statusUpdater.start(); } private List<LogAggregationReport> getLogAggregationReportsForApps( ConcurrentLinkedQueue<LogAggregationReport> lastestLogAggregationStatus) { LogAggregationReport status; while ((status = lastestLogAggregationStatus.poll()) != null) { this.logAggregationReportForAppsTempList.add(status); } List<LogAggregationReport> reports = new ArrayList<LogAggregationReport>(); reports.addAll(logAggregationReportForAppsTempList); return reports; } }
36,453
40.805046
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.service.AbstractService; public class NodeResourceMonitorImpl extends AbstractService implements NodeResourceMonitor { public NodeResourceMonitorImpl() { super(NodeResourceMonitorImpl.class.getName()); } }
1,090
34.193548
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; public enum ContainerManagerEventType { FINISH_APPS, FINISH_CONTAINERS, }
922
35.92
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.api.records.ContainerId; public interface NodeStatusUpdater extends Service { /** * Schedule a heartbeat to the ResourceManager outside of the normal, * periodic heartbeating process. This is typically called when the state * of containers on the node has changed to notify the RM sooner. */ void sendOutofBandHeartBeat(); /** * Get the ResourceManager identifier received during registration * @return the ResourceManager ID */ long getRMIdentifier(); /** * Query if a container has recently completed * @param containerId the container ID * @return true if the container has recently completed */ public boolean isContainerRecentlyStopped(ContainerId containerId); /** * Add a container to the list of containers that have recently completed * @param containerId the ID of the completed container */ public void addCompletedContainer(ContainerId containerId); /** * Clear the list of recently completed containers */ public void clearFinishedContainersFromCache(); }
1,963
33.45614
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.net.InetAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; /** * Manages NodeManager audit logs. * * Audit log format is written as key=value pairs. Tab separated. */ public class NMAuditLogger { private static final Log LOG = LogFactory.getLog(NMAuditLogger.class); static enum Keys {USER, OPERATION, TARGET, RESULT, IP, DESCRIPTION, APPID, CONTAINERID} public static class AuditConstants { static final String SUCCESS = "SUCCESS"; static final String FAILURE = "FAILURE"; static final String KEY_VAL_SEPARATOR = "="; static final char PAIR_SEPARATOR = '\t'; // Some commonly used descriptions public static final String START_CONTAINER = "Start Container Request"; public static final String STOP_CONTAINER = "Stop Container Request"; public static final String FINISH_SUCCESS_CONTAINER = "Container Finished - Succeeded"; public static final String FINISH_FAILED_CONTAINER = "Container Finished - Failed"; public static final String FINISH_KILLED_CONTAINER = "Container Finished - Killed"; } /** * A helper api for creating an audit log for a successful event. */ static String createSuccessLog(String user, String operation, String target, ApplicationId appId, ContainerId containerId) { StringBuilder b = new StringBuilder(); start(Keys.USER, user, b); addRemoteIP(b); add(Keys.OPERATION, operation, b); add(Keys.TARGET, target ,b); add(Keys.RESULT, AuditConstants.SUCCESS, b); if (appId != null) { add(Keys.APPID, appId.toString(), b); } if (containerId != null) { add(Keys.CONTAINERID, containerId.toString(), b); } return b.toString(); } /** * Create a readable and parseable audit log string for a successful event. * * @param user User who made the service request. * @param operation Operation requested by the user * @param target The target on which the operation is being performed. * @param appId Application Id in which operation was performed. * @param containerId Container Id in which operation was performed. * * <br><br> * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter * and hence the value fields should not contains tabs ('\t'). */ public static void logSuccess(String user, String operation, String target, ApplicationId appId, ContainerId containerId) { if (LOG.isInfoEnabled()) { LOG.info(createSuccessLog(user, operation, target, appId, containerId)); } } /** * Create a readable and parseable audit log string for a successful event. * * @param user User who made the service request. * @param operation Operation requested by the user * @param target The target on which the operation is being performed. * * <br><br> * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter * and hence the value fields should not contains tabs ('\t'). */ public static void logSuccess(String user, String operation, String target) { if (LOG.isInfoEnabled()) { LOG.info(createSuccessLog(user, operation, target, null, null)); } } /** * A helper api for creating an audit log for a failure event. * This is factored out for testing purpose. */ static String createFailureLog(String user, String operation, String target, String description, ApplicationId appId, ContainerId containerId) { StringBuilder b = new StringBuilder(); start(Keys.USER, user, b); addRemoteIP(b); add(Keys.OPERATION, operation, b); add(Keys.TARGET, target ,b); add(Keys.RESULT, AuditConstants.FAILURE, b); add(Keys.DESCRIPTION, description, b); if (appId != null) { add(Keys.APPID, appId.toString(), b); } if (containerId != null) { add(Keys.CONTAINERID, containerId.toString(), b); } return b.toString(); } /** * Create a readable and parseable audit log string for a failed event. * * @param user User who made the service request. * @param operation Operation requested by the user. * @param target The target on which the operation is being performed. * @param description Some additional information as to why the operation * failed. * @param appId ApplicationId in which operation was performed. * @param containerId Container Id in which operation was performed. * * <br><br> * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter * and hence the value fields should not contains tabs ('\t'). */ public static void logFailure(String user, String operation, String target, String description, ApplicationId appId, ContainerId containerId) { if (LOG.isWarnEnabled()) { LOG.warn(createFailureLog(user, operation, target, description, appId, containerId)); } } /** * Create a readable and parseable audit log string for a failed event. * * @param user User who made the service request. * @param operation Operation requested by the user. * @param target The target on which the operation is being performed. * @param description Some additional information as to why the operation * failed. * * <br><br> * Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter * and hence the value fields should not contains tabs ('\t'). */ public static void logFailure(String user, String operation, String target, String description) { if (LOG.isWarnEnabled()) { LOG.warn(createFailureLog(user, operation, target, description, null, null)); } } /** * A helper api to add remote IP address */ static void addRemoteIP(StringBuilder b) { InetAddress ip = Server.getRemoteIp(); // ip address can be null for testcases if (ip != null) { add(Keys.IP, ip.getHostAddress(), b); } } /** * Adds the first key-val pair to the passed builder in the following format * key=value */ static void start(Keys key, String value, StringBuilder b) { b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); } /** * Appends the key-val pair to the passed builder in the following format * <pair-delim>key=value */ static void add(Keys key, String value, StringBuilder b) { b.append(AuditConstants.PAIR_SEPARATOR).append(key.name()) .append(AuditConstants.KEY_VAL_SEPARATOR).append(value); } }
7,557
36.415842
91
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.util.List; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEventType; public class CMgrCompletedAppsEvent extends ContainerManagerEvent { private final List<ApplicationId> appsToCleanup; private final Reason reason; public CMgrCompletedAppsEvent(List<ApplicationId> appsToCleanup, Reason reason) { super(ContainerManagerEventType.FINISH_APPS); this.appsToCleanup = appsToCleanup; this.reason = reason; } public List<ApplicationId> getAppsToCleanup() { return this.appsToCleanup; } public Reason getReason() { return reason; } public static enum Reason { /** * Application is killed as NodeManager is shut down */ ON_SHUTDOWN, /** * Application is killed by ResourceManager */ BY_RESOURCEMANAGER } }
1,786
29.810345
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import static java.util.concurrent.TimeUnit.SECONDS; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.FutureTask; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; public class DeletionService extends AbstractService { static final Log LOG = LogFactory.getLog(DeletionService.class); private int debugDelay; private final ContainerExecutor exec; private ScheduledThreadPoolExecutor sched; private static final FileContext lfs = getLfs(); private final NMStateStoreService stateStore; private AtomicInteger nextTaskId = new AtomicInteger(0); static final FileContext getLfs() { try { return FileContext.getLocalFSFileContext(); } catch (UnsupportedFileSystemException e) { throw new RuntimeException(e); } } public DeletionService(ContainerExecutor exec) { this(exec, new NMNullStateStoreService()); } public DeletionService(ContainerExecutor exec, NMStateStoreService stateStore) { super(DeletionService.class.getName()); this.exec = exec; this.debugDelay = 0; this.stateStore = stateStore; } /** * Delete the path(s) as this user. * @param user The user to delete as, or the JVM user if null * @param subDir the sub directory name * @param baseDirs the base directories which contains the subDir's */ public void delete(String user, Path subDir, Path... baseDirs) { // TODO if parent owned by NM, rename within parent inline if (debugDelay != -1) { List<Path> baseDirList = null; if (baseDirs != null && baseDirs.length != 0) { baseDirList = Arrays.asList(baseDirs); } FileDeletionTask task = new FileDeletionTask(this, user, subDir, baseDirList); recordDeletionTaskInStateStore(task); sched.schedule(task, debugDelay, TimeUnit.SECONDS); } } public void scheduleFileDeletionTask(FileDeletionTask fileDeletionTask) { if (debugDelay != -1) { recordDeletionTaskInStateStore(fileDeletionTask); sched.schedule(fileDeletionTask, debugDelay, TimeUnit.SECONDS); } } @Override protected void serviceInit(Configuration conf) throws Exception { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("DeletionService #%d") .build(); if (conf != null) { sched = new DelServiceSchedThreadPoolExecutor( conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf); debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0); } else { sched = new DelServiceSchedThreadPoolExecutor( YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf); } sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); sched.setKeepAliveTime(60L, SECONDS); if (stateStore.canRecover()) { recover(stateStore.loadDeletionServiceState()); } super.serviceInit(conf); } @Override protected void serviceStop() throws Exception { if (sched != null) { sched.shutdown(); boolean terminated = false; try { terminated = sched.awaitTermination(10, SECONDS); } catch (InterruptedException e) { } if (terminated != true) { sched.shutdownNow(); } } super.serviceStop(); } /** * Determine if the service has completely stopped. * Used only by unit tests * @return true if service has completely stopped */ @Private public boolean isTerminated() { return getServiceState() == STATE.STOPPED && sched.isTerminated(); } private static class DelServiceSchedThreadPoolExecutor extends ScheduledThreadPoolExecutor { public DelServiceSchedThreadPoolExecutor(int corePoolSize, ThreadFactory threadFactory) { super(corePoolSize, threadFactory); } @Override protected void afterExecute(Runnable task, Throwable exception) { if (task instanceof FutureTask<?>) { FutureTask<?> futureTask = (FutureTask<?>) task; if (!futureTask.isCancelled()) { try { futureTask.get(); } catch (ExecutionException ee) { exception = ee.getCause(); } catch (InterruptedException ie) { exception = ie; } } } if (exception != null) { LOG.error("Exception during execution of task in DeletionService", exception); } } } public static class FileDeletionTask implements Runnable { public static final int INVALID_TASK_ID = -1; private int taskId; private final String user; private final Path subDir; private final List<Path> baseDirs; private final AtomicInteger numberOfPendingPredecessorTasks; private final Set<FileDeletionTask> successorTaskSet; private final DeletionService delService; // By default all tasks will start as success=true; however if any of // the dependent task fails then it will be marked as false in // fileDeletionTaskFinished(). private boolean success; private FileDeletionTask(DeletionService delService, String user, Path subDir, List<Path> baseDirs) { this(INVALID_TASK_ID, delService, user, subDir, baseDirs); } private FileDeletionTask(int taskId, DeletionService delService, String user, Path subDir, List<Path> baseDirs) { this.taskId = taskId; this.delService = delService; this.user = user; this.subDir = subDir; this.baseDirs = baseDirs; this.successorTaskSet = new HashSet<FileDeletionTask>(); this.numberOfPendingPredecessorTasks = new AtomicInteger(0); success = true; } /** * increments and returns pending predecessor task count */ public int incrementAndGetPendingPredecessorTasks() { return numberOfPendingPredecessorTasks.incrementAndGet(); } /** * decrements and returns pending predecessor task count */ public int decrementAndGetPendingPredecessorTasks() { return numberOfPendingPredecessorTasks.decrementAndGet(); } @VisibleForTesting public String getUser() { return this.user; } @VisibleForTesting public Path getSubDir() { return this.subDir; } @VisibleForTesting public List<Path> getBaseDirs() { return this.baseDirs; } public synchronized void setSuccess(boolean success) { this.success = success; } public synchronized boolean getSucess() { return this.success; } public synchronized FileDeletionTask[] getSuccessorTasks() { FileDeletionTask[] successors = new FileDeletionTask[successorTaskSet.size()]; return successorTaskSet.toArray(successors); } @Override public void run() { if (LOG.isDebugEnabled()) { LOG.debug(this); } boolean error = false; if (null == user) { if (baseDirs == null || baseDirs.size() == 0) { LOG.debug("NM deleting absolute path : " + subDir); try { lfs.delete(subDir, true); } catch (IOException e) { error = true; LOG.warn("Failed to delete " + subDir); } } else { for (Path baseDir : baseDirs) { Path del = subDir == null? baseDir : new Path(baseDir, subDir); LOG.debug("NM deleting path : " + del); try { lfs.delete(del, true); } catch (IOException e) { error = true; LOG.warn("Failed to delete " + subDir); } } } } else { try { LOG.debug("Deleting path: [" + subDir + "] as user: [" + user + "]"); if (baseDirs == null || baseDirs.size() == 0) { delService.exec.deleteAsUser(new DeletionAsUserContext.Builder() .setUser(user) .setSubDir(subDir) .build()); } else { delService.exec.deleteAsUser(new DeletionAsUserContext.Builder() .setUser(user) .setSubDir(subDir) .setBasedirs(baseDirs.toArray(new Path[0])) .build()); } } catch (IOException e) { error = true; LOG.warn("Failed to delete as user " + user, e); } catch (InterruptedException e) { error = true; LOG.warn("Failed to delete as user " + user, e); } } if (error) { setSuccess(!error); } fileDeletionTaskFinished(); } @Override public String toString() { StringBuffer sb = new StringBuffer("\nFileDeletionTask : "); sb.append(" user : ").append(this.user); sb.append(" subDir : ").append( subDir == null ? "null" : subDir.toString()); sb.append(" baseDir : "); if (baseDirs == null || baseDirs.size() == 0) { sb.append("null"); } else { for (Path baseDir : baseDirs) { sb.append(baseDir.toString()).append(','); } } return sb.toString(); } /** * If there is a task dependency between say tasks 1,2,3 such that * task2 and task3 can be started only after task1 then we should define * task2 and task3 as successor tasks for task1. * Note:- Task dependency should be defined prior to * @param successorTask */ public synchronized void addFileDeletionTaskDependency( FileDeletionTask successorTask) { if (successorTaskSet.add(successorTask)) { successorTask.incrementAndGetPendingPredecessorTasks(); } } /* * This is called when * 1) Current file deletion task ran and finished. * 2) This can be even directly called by predecessor task if one of the * dependent tasks of it has failed marking its success = false. */ private synchronized void fileDeletionTaskFinished() { try { delService.stateStore.removeDeletionTask(taskId); } catch (IOException e) { LOG.error("Unable to remove deletion task " + taskId + " from state store", e); } Iterator<FileDeletionTask> successorTaskI = this.successorTaskSet.iterator(); while (successorTaskI.hasNext()) { FileDeletionTask successorTask = successorTaskI.next(); if (!success) { successorTask.setSuccess(success); } int count = successorTask.decrementAndGetPendingPredecessorTasks(); if (count == 0) { if (successorTask.getSucess()) { successorTask.delService.scheduleFileDeletionTask(successorTask); } else { successorTask.fileDeletionTaskFinished(); } } } } } /** * Helper method to create file deletion task. To be used only if we need * a way to define dependencies between deletion tasks. * @param user user on whose behalf this task is suppose to run * @param subDir sub directory as required in * {@link DeletionService#delete(String, Path, Path...)} * @param baseDirs base directories as required in * {@link DeletionService#delete(String, Path, Path...)} */ public FileDeletionTask createFileDeletionTask(String user, Path subDir, Path[] baseDirs) { return new FileDeletionTask(this, user, subDir, Arrays.asList(baseDirs)); } private void recover(RecoveredDeletionServiceState state) throws IOException { List<DeletionServiceDeleteTaskProto> taskProtos = state.getTasks(); Map<Integer, DeletionTaskRecoveryInfo> idToInfoMap = new HashMap<Integer, DeletionTaskRecoveryInfo>(taskProtos.size()); Set<Integer> successorTasks = new HashSet<Integer>(); for (DeletionServiceDeleteTaskProto proto : taskProtos) { DeletionTaskRecoveryInfo info = parseTaskProto(proto); idToInfoMap.put(info.task.taskId, info); nextTaskId.set(Math.max(nextTaskId.get(), info.task.taskId)); successorTasks.addAll(info.successorTaskIds); } // restore the task dependencies and schedule the deletion tasks that // have no predecessors final long now = System.currentTimeMillis(); for (DeletionTaskRecoveryInfo info : idToInfoMap.values()) { for (Integer successorId : info.successorTaskIds){ DeletionTaskRecoveryInfo successor = idToInfoMap.get(successorId); if (successor != null) { info.task.addFileDeletionTaskDependency(successor.task); } else { LOG.error("Unable to locate dependency task for deletion task " + info.task.taskId + " at " + info.task.getSubDir()); } } if (!successorTasks.contains(info.task.taskId)) { long msecTilDeletion = info.deletionTimestamp - now; sched.schedule(info.task, msecTilDeletion, TimeUnit.MILLISECONDS); } } } private DeletionTaskRecoveryInfo parseTaskProto( DeletionServiceDeleteTaskProto proto) throws IOException { int taskId = proto.getId(); String user = proto.hasUser() ? proto.getUser() : null; Path subdir = null; List<Path> basePaths = null; if (proto.hasSubdir()) { subdir = new Path(proto.getSubdir()); } List<String> basedirs = proto.getBasedirsList(); if (basedirs != null && basedirs.size() > 0) { basePaths = new ArrayList<Path>(basedirs.size()); for (String basedir : basedirs) { basePaths.add(new Path(basedir)); } } FileDeletionTask task = new FileDeletionTask(taskId, this, user, subdir, basePaths); return new DeletionTaskRecoveryInfo(task, proto.getSuccessorIdsList(), proto.getDeletionTime()); } private int generateTaskId() { // get the next ID but avoid an invalid ID int taskId = nextTaskId.incrementAndGet(); while (taskId == FileDeletionTask.INVALID_TASK_ID) { taskId = nextTaskId.incrementAndGet(); } return taskId; } private void recordDeletionTaskInStateStore(FileDeletionTask task) { if (!stateStore.canRecover()) { // optimize the case where we aren't really recording return; } if (task.taskId != FileDeletionTask.INVALID_TASK_ID) { return; // task already recorded } task.taskId = generateTaskId(); FileDeletionTask[] successors = task.getSuccessorTasks(); // store successors first to ensure task IDs have been generated for them for (FileDeletionTask successor : successors) { recordDeletionTaskInStateStore(successor); } DeletionServiceDeleteTaskProto.Builder builder = DeletionServiceDeleteTaskProto.newBuilder(); builder.setId(task.taskId); if (task.getUser() != null) { builder.setUser(task.getUser()); } if (task.getSubDir() != null) { builder.setSubdir(task.getSubDir().toString()); } builder.setDeletionTime(System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(debugDelay, TimeUnit.SECONDS)); if (task.getBaseDirs() != null) { for (Path dir : task.getBaseDirs()) { builder.addBasedirs(dir.toString()); } } for (FileDeletionTask successor : successors) { builder.addSuccessorIds(successor.taskId); } try { stateStore.storeDeletionTask(task.taskId, builder.build()); } catch (IOException e) { LOG.error("Unable to store deletion task " + task.taskId + " for " + task.getSubDir(), e); } } private static class DeletionTaskRecoveryInfo { FileDeletionTask task; List<Integer> successorTaskIds; long deletionTimestamp; public DeletionTaskRecoveryInfo(FileDeletionTask task, List<Integer> successorTaskIds, long deletionTimestamp) { this.task = task; this.successorTaskIds = successorTaskIds; this.deletionTimestamp = deletionTimestamp; } } }
18,080
33.771154
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler; import org.apache.hadoop.yarn.util.ConverterUtils; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.regex.Pattern; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; /** Container execution for Linux. Provides linux-specific localization * mechanisms, resource management via cgroups and can switch between multiple * container runtimes - e.g Standard "Process Tree", Docker etc */ public class LinuxContainerExecutor extends ContainerExecutor { private static final Log LOG = LogFactory .getLog(LinuxContainerExecutor.class); private String nonsecureLocalUser; private Pattern nonsecureLocalUserPattern; private String containerExecutorExe; private LCEResourcesHandler resourcesHandler; private boolean containerSchedPriorityIsSet = false; private int containerSchedPriorityAdjustment = 0; private boolean containerLimitUsers; private ResourceHandler resourceHandlerChain; private LinuxContainerRuntime linuxContainerRuntime; public LinuxContainerExecutor() { } // created primarily for testing public LinuxContainerExecutor(LinuxContainerRuntime linuxContainerRuntime) { this.linuxContainerRuntime = linuxContainerRuntime; } @Override public void setConf(Configuration conf) { super.setConf(conf); containerExecutorExe = getContainerExecutorExecutablePath(conf); resourcesHandler = ReflectionUtils.newInstance( conf.getClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER, DefaultLCEResourcesHandler.class, LCEResourcesHandler.class), conf); resourcesHandler.setConf(conf); if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != null) { containerSchedPriorityIsSet = true; containerSchedPriorityAdjustment = conf .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY); } nonsecureLocalUser = conf.get( YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER); nonsecureLocalUserPattern = Pattern.compile( conf.get(YarnConfiguration.NM_NONSECURE_MODE_USER_PATTERN_KEY, YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_USER_PATTERN)); containerLimitUsers = conf.getBoolean( YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS); if (!containerLimitUsers) { LOG.warn(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS + ": impersonation without authentication enabled"); } } void verifyUsernamePattern(String user) { if (!UserGroupInformation.isSecurityEnabled() && !nonsecureLocalUserPattern.matcher(user).matches()) { throw new IllegalArgumentException("Invalid user name '" + user + "'," + " it must match '" + nonsecureLocalUserPattern.pattern() + "'"); } } String getRunAsUser(String user) { if (UserGroupInformation.isSecurityEnabled() || !containerLimitUsers) { return user; } else { return nonsecureLocalUser; } } protected String getContainerExecutorExecutablePath(Configuration conf) { String yarnHomeEnvVar = System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key()); File hadoopBin = new File(yarnHomeEnvVar, "bin"); String defaultPath = new File(hadoopBin, "container-executor").getAbsolutePath(); return null == conf ? defaultPath : conf.get(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, defaultPath); } protected void addSchedPriorityCommand(List<String> command) { if (containerSchedPriorityIsSet) { command.addAll(Arrays.asList("nice", "-n", Integer.toString(containerSchedPriorityAdjustment))); } } @Override public void init() throws IOException { // Send command to executor which will just start up, // verify configuration/permissions and exit List<String> command = new ArrayList<String>( Arrays.asList(containerExecutorExe, "--checksetup")); String[] commandArray = command.toArray(new String[command.size()]); ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray); if (LOG.isDebugEnabled()) { LOG.debug("checkLinuxExecutorSetup: " + Arrays.toString(commandArray)); } try { shExec.execute(); } catch (ExitCodeException e) { int exitCode = shExec.getExitCode(); LOG.warn("Exit code from container executor initialization is : " + exitCode, e); logOutput(shExec.getOutput()); throw new IOException("Linux container executor not configured properly" + " (error=" + exitCode + ")", e); } Configuration conf = super.getConf(); try { resourceHandlerChain = ResourceHandlerModule .getConfiguredResourceHandlerChain(conf); if (resourceHandlerChain != null) { resourceHandlerChain.bootstrap(conf); } } catch (ResourceHandlerException e) { LOG.error("Failed to bootstrap configured resource subsystems! ", e); throw new IOException("Failed to bootstrap configured resource subsystems!"); } try { if (linuxContainerRuntime == null) { LinuxContainerRuntime runtime = new DelegatingLinuxContainerRuntime(); runtime.initialize(conf); this.linuxContainerRuntime = runtime; } } catch (ContainerExecutionException e) { throw new IOException("Failed to initialize linux container runtime(s)!"); } resourcesHandler.init(this); } @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens(); InetSocketAddress nmAddr = ctx.getNmAddr(); String user = ctx.getUser(); String appId = ctx.getAppId(); String locId = ctx.getLocId(); LocalDirsHandlerService dirsHandler = ctx.getDirsHandler(); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); List<String> command = new ArrayList<String>(); addSchedPriorityCommand(command); command.addAll(Arrays.asList(containerExecutorExe, runAsUser, user, Integer.toString(PrivilegedOperation.RunAsUserCommand.INITIALIZE_CONTAINER.getValue()), appId, nmPrivateContainerTokensPath.toUri().getPath().toString(), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, localDirs), StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR, logDirs))); File jvm = // use same jvm as parent new File(new File(System.getProperty("java.home"), "bin"), "java"); command.add(jvm.toString()); command.add("-classpath"); command.add(System.getProperty("java.class.path")); String javaLibPath = System.getProperty("java.library.path"); if (javaLibPath != null) { command.add("-Djava.library.path=" + javaLibPath); } command.addAll(ContainerLocalizer.getJavaOpts(getConf())); buildMainArgs(command, user, appId, locId, nmAddr, localDirs); String[] commandArray = command.toArray(new String[command.size()]); ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray); if (LOG.isDebugEnabled()) { LOG.debug("initApplication: " + Arrays.toString(commandArray)); } try { shExec.execute(); if (LOG.isDebugEnabled()) { logOutput(shExec.getOutput()); } } catch (ExitCodeException e) { int exitCode = shExec.getExitCode(); LOG.warn("Exit code from container " + locId + " startLocalizer is : " + exitCode, e); logOutput(shExec.getOutput()); throw new IOException("Application " + appId + " initialization failed" + " (exitCode=" + exitCode + ") with output: " + shExec.getOutput(), e); } } @VisibleForTesting public void buildMainArgs(List<String> command, String user, String appId, String locId, InetSocketAddress nmAddr, List<String> localDirs) { ContainerLocalizer.buildMainArgs(command, user, appId, locId, nmAddr, localDirs); } @Override public int launchContainer(ContainerStartContext ctx) throws IOException { Container container = ctx.getContainer(); Path nmPrivateContainerScriptPath = ctx.getNmPrivateContainerScriptPath(); Path nmPrivateTokensPath = ctx.getNmPrivateTokensPath(); String user = ctx.getUser(); String appId = ctx.getAppId(); Path containerWorkDir = ctx.getContainerWorkDir(); List<String> localDirs = ctx.getLocalDirs(); List<String> logDirs = ctx.getLogDirs(); Map<Path, List<String>> localizedResources = ctx.getLocalizedResources(); verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); ContainerId containerId = container.getContainerId(); String containerIdStr = ConverterUtils.toString(containerId); resourcesHandler.preExecute(containerId, container.getResource()); String resourcesOptions = resourcesHandler.getResourcesOption( containerId); String tcCommandFile = null; try { if (resourceHandlerChain != null) { List<PrivilegedOperation> ops = resourceHandlerChain .preStart(container); if (ops != null) { List<PrivilegedOperation> resourceOps = new ArrayList<>(); resourceOps.add(new PrivilegedOperation (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, resourcesOptions)); for (PrivilegedOperation op : ops) { switch (op.getOperationType()) { case ADD_PID_TO_CGROUP: resourceOps.add(op); break; case TC_MODIFY_STATE: tcCommandFile = op.getArguments().get(0); break; default: LOG.warn("PrivilegedOperation type unsupported in launch: " + op.getOperationType()); } } if (resourceOps.size() > 1) { //squash resource operations try { PrivilegedOperation operation = PrivilegedOperationExecutor .squashCGroupOperations(resourceOps); resourcesOptions = operation.getArguments().get(0); } catch (PrivilegedOperationException e) { LOG.error("Failed to squash cgroup operations!", e); throw new ResourceHandlerException("Failed to squash cgroup operations!"); } } } } } catch (ResourceHandlerException e) { LOG.error("ResourceHandlerChain.preStart() failed!", e); throw new IOException("ResourceHandlerChain.preStart() failed!"); } try { Path pidFilePath = getPidFilePath(containerId); if (pidFilePath != null) { List<String> prefixCommands= new ArrayList<>(); ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext .Builder(container); addSchedPriorityCommand(prefixCommands); if (prefixCommands.size() > 0) { builder.setExecutionAttribute(CONTAINER_LAUNCH_PREFIX_COMMANDS, prefixCommands); } builder.setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources) .setExecutionAttribute(RUN_AS_USER, runAsUser) .setExecutionAttribute(USER, user) .setExecutionAttribute(APPID, appId) .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr) .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir) .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, nmPrivateContainerScriptPath) .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath) .setExecutionAttribute(PID_FILE_PATH, pidFilePath) .setExecutionAttribute(LOCAL_DIRS, localDirs) .setExecutionAttribute(LOG_DIRS, logDirs) .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions); if (tcCommandFile != null) { builder.setExecutionAttribute(TC_COMMAND_FILE, tcCommandFile); } linuxContainerRuntime.launchContainer(builder.build()); } else { LOG.info("Container was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } } catch (ContainerExecutionException e) { int exitCode = e.getExitCode(); LOG.warn("Exit code from container " + containerId + " is : " + exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the // output if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) { LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode , e); StringBuilder builder = new StringBuilder(); builder.append("Exception from container-launch.\n"); builder.append("Container id: " + containerId + "\n"); builder.append("Exit code: " + exitCode + "\n"); if (!Optional.fromNullable(e.getErrorOutput()).or("").isEmpty()) { builder.append("Exception message: " + e.getErrorOutput() + "\n"); } builder.append("Stack trace: " + StringUtils.stringifyException(e) + "\n"); if (!e.getOutput().isEmpty()) { builder.append("Shell output: " + e.getOutput() + "\n"); } String diagnostics = builder.toString(); logOutput(diagnostics); container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics)); } else { container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode)); } return exitCode; } finally { resourcesHandler.postExecute(containerId); try { if (resourceHandlerChain != null) { resourceHandlerChain.postComplete(containerId); } } catch (ResourceHandlerException e) { LOG.warn("ResourceHandlerChain.postComplete failed for " + "containerId: " + containerId + ". Exception: " + e); } } return 0; } @Override public int reacquireContainer(ContainerReacquisitionContext ctx) throws IOException, InterruptedException { ContainerId containerId = ctx.getContainerId(); try { //Resource handler chain needs to reacquire container state //as well if (resourceHandlerChain != null) { try { resourceHandlerChain.reacquireContainer(containerId); } catch (ResourceHandlerException e) { LOG.warn("ResourceHandlerChain.reacquireContainer failed for " + "containerId: " + containerId + " Exception: " + e); } } return super.reacquireContainer(ctx); } finally { resourcesHandler.postExecute(containerId); if (resourceHandlerChain != null) { try { resourceHandlerChain.postComplete(containerId); } catch (ResourceHandlerException e) { LOG.warn("ResourceHandlerChain.postComplete failed for " + "containerId: " + containerId + " Exception: " + e); } } } } @Override public boolean signalContainer(ContainerSignalContext ctx) throws IOException { Container container = ctx.getContainer(); String user = ctx.getUser(); String pid = ctx.getPid(); Signal signal = ctx.getSignal(); verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); ContainerRuntimeContext runtimeContext = new ContainerRuntimeContext .Builder(container) .setExecutionAttribute(RUN_AS_USER, runAsUser) .setExecutionAttribute(USER, user) .setExecutionAttribute(PID, pid) .setExecutionAttribute(SIGNAL, signal) .build(); try { linuxContainerRuntime.signalContainer(runtimeContext); } catch (ContainerExecutionException e) { int retCode = e.getExitCode(); if (retCode == PrivilegedOperation.ResultCode.INVALID_CONTAINER_PID.getValue()) { return false; } LOG.warn("Error in signalling container " + pid + " with " + signal + "; exit = " + retCode, e); logOutput(e.getOutput()); throw new IOException("Problem signalling container " + pid + " with " + signal + "; output: " + e.getOutput() + " and exitCode: " + retCode, e); } return true; } @Override public void deleteAsUser(DeletionAsUserContext ctx) { String user = ctx.getUser(); Path dir = ctx.getSubDir(); List<Path> baseDirs = ctx.getBasedirs(); verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); String dirString = dir == null ? "" : dir.toUri().getPath(); List<String> command = new ArrayList<String>( Arrays.asList(containerExecutorExe, runAsUser, user, Integer.toString(PrivilegedOperation. RunAsUserCommand.DELETE_AS_USER.getValue()), dirString)); List<String> pathsToDelete = new ArrayList<String>(); if (baseDirs == null || baseDirs.size() == 0) { LOG.info("Deleting absolute path : " + dir); pathsToDelete.add(dirString); } else { for (Path baseDir : baseDirs) { Path del = dir == null ? baseDir : new Path(baseDir, dir); LOG.info("Deleting path : " + del); pathsToDelete.add(del.toString()); command.add(baseDir.toUri().getPath()); } } String[] commandArray = command.toArray(new String[command.size()]); ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray); if (LOG.isDebugEnabled()) { LOG.debug("deleteAsUser: " + Arrays.toString(commandArray)); } try { shExec.execute(); if (LOG.isDebugEnabled()) { logOutput(shExec.getOutput()); } } catch (IOException e) { int exitCode = shExec.getExitCode(); LOG.error("DeleteAsUser for " + StringUtils.join(" ", pathsToDelete) + " returned with exit code: " + exitCode, e); LOG.error("Output from LinuxContainerExecutor's deleteAsUser follows:"); logOutput(shExec.getOutput()); } } @Override public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String user = ctx.getUser(); String pid = ctx.getPid(); Container container = ctx.getContainer(); // Send a test signal to the process as the user to see if it's alive return signalContainer(new ContainerSignalContext.Builder() .setContainer(container) .setUser(user) .setPid(pid) .setSignal(Signal.NULL) .build()); } public void mountCgroups(List<String> cgroupKVs, String hierarchy) throws IOException { List<String> command = new ArrayList<String>( Arrays.asList(containerExecutorExe, "--mount-cgroups", hierarchy)); command.addAll(cgroupKVs); String[] commandArray = command.toArray(new String[command.size()]); ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray); if (LOG.isDebugEnabled()) { LOG.debug("mountCgroups: " + Arrays.toString(commandArray)); } try { shExec.execute(); } catch (IOException e) { int ret_code = shExec.getExitCode(); LOG.warn("Exception in LinuxContainerExecutor mountCgroups ", e); logOutput(shExec.getOutput()); throw new IOException("Problem mounting cgroups " + cgroupKVs + "; exit code = " + ret_code + " and output: " + shExec.getOutput(), e); } } }
23,880
39.75256
120
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ResourceView.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; public interface ResourceView { long getVmemAllocatedForContainers(); boolean isVmemCheckEnabled(); long getPmemAllocatedForContainers(); boolean isPmemCheckEnabled(); long getVCoresAllocatedForContainers(); }
1,069
31.424242
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.Timer; import java.util.TimerTask; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; /** * The class which provides functionality of checking the health of the local * directories of a node. This specifically manages nodemanager-local-dirs and * nodemanager-log-dirs by periodically checking their health. */ public class LocalDirsHandlerService extends AbstractService { private static Log LOG = LogFactory.getLog(LocalDirsHandlerService.class); /** Timer used to schedule disk health monitoring code execution */ private Timer dirsHandlerScheduler; private long diskHealthCheckInterval; private boolean isDiskHealthCheckerEnabled; /** * Minimum fraction of disks to be healthy for the node to be healthy in * terms of disks. This applies to nm-local-dirs and nm-log-dirs. */ private float minNeededHealthyDisksFactor; private MonitoringTimerTask monitoringTimerTask; /** Local dirs to store localized files in */ private DirectoryCollection localDirs = null; /** storage for container logs*/ private DirectoryCollection logDirs = null; /** * Everybody should go through this LocalDirAllocator object for read/write * of any local path corresponding to {@link YarnConfiguration#NM_LOCAL_DIRS} * instead of creating his/her own LocalDirAllocator objects */ private LocalDirAllocator localDirsAllocator; /** * Everybody should go through this LocalDirAllocator object for read/write * of any local path corresponding to {@link YarnConfiguration#NM_LOG_DIRS} * instead of creating his/her own LocalDirAllocator objects */ private LocalDirAllocator logDirsAllocator; /** when disk health checking code was last run */ private long lastDisksCheckTime; private static String FILE_SCHEME = "file"; private NodeManagerMetrics nodeManagerMetrics = null; /** * Class which is used by the {@link Timer} class to periodically execute the * disks' health checker code. */ private final class MonitoringTimerTask extends TimerTask { public MonitoringTimerTask(Configuration conf) throws YarnRuntimeException { float maxUsableSpacePercentagePerDisk = conf.getFloat( YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE); long minFreeSpacePerDiskMB = conf.getLong(YarnConfiguration.NM_MIN_PER_DISK_FREE_SPACE_MB, YarnConfiguration.DEFAULT_NM_MIN_PER_DISK_FREE_SPACE_MB); localDirs = new DirectoryCollection( validatePaths(conf .getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)), maxUsableSpacePercentagePerDisk, minFreeSpacePerDiskMB); logDirs = new DirectoryCollection( validatePaths(conf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)), maxUsableSpacePercentagePerDisk, minFreeSpacePerDiskMB); localDirsAllocator = new LocalDirAllocator( YarnConfiguration.NM_LOCAL_DIRS); logDirsAllocator = new LocalDirAllocator(YarnConfiguration.NM_LOG_DIRS); } @Override public void run() { checkDirs(); } } public LocalDirsHandlerService() { this(null); } public LocalDirsHandlerService(NodeManagerMetrics nodeManagerMetrics) { super(LocalDirsHandlerService.class.getName()); this.nodeManagerMetrics = nodeManagerMetrics; } /** * Method which initializes the timertask and its interval time. * */ @Override protected void serviceInit(Configuration config) throws Exception { // Clone the configuration as we may do modifications to dirs-list Configuration conf = new Configuration(config); diskHealthCheckInterval = conf.getLong( YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS); monitoringTimerTask = new MonitoringTimerTask(conf); isDiskHealthCheckerEnabled = conf.getBoolean( YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true); minNeededHealthyDisksFactor = conf.getFloat( YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION); lastDisksCheckTime = System.currentTimeMillis(); super.serviceInit(conf); FileContext localFs; try { localFs = FileContext.getLocalFSFileContext(config); } catch (IOException e) { throw new YarnRuntimeException("Unable to get the local filesystem", e); } FsPermission perm = new FsPermission((short)0755); boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm); createSucceeded &= logDirs.createNonExistentDirs(localFs, perm); if (!createSucceeded) { updateDirsAfterTest(); } // Check the disk health immediately to weed out bad directories // before other init code attempts to use them. checkDirs(); } /** * Method used to start the disk health monitoring, if enabled. */ @Override protected void serviceStart() throws Exception { if (isDiskHealthCheckerEnabled) { dirsHandlerScheduler = new Timer("DiskHealthMonitor-Timer", true); dirsHandlerScheduler.scheduleAtFixedRate(monitoringTimerTask, diskHealthCheckInterval, diskHealthCheckInterval); } super.serviceStart(); } /** * Method used to terminate the disk health monitoring service. */ @Override protected void serviceStop() throws Exception { if (dirsHandlerScheduler != null) { dirsHandlerScheduler.cancel(); } super.serviceStop(); } public void registerLocalDirsChangeListener(DirsChangeListener listener) { localDirs.registerDirsChangeListener(listener); } public void registerLogDirsChangeListener(DirsChangeListener listener) { logDirs.registerDirsChangeListener(listener); } public void deregisterLocalDirsChangeListener(DirsChangeListener listener) { localDirs.deregisterDirsChangeListener(listener); } public void deregisterLogDirsChangeListener(DirsChangeListener listener) { logDirs.deregisterDirsChangeListener(listener); } /** * @return the good/valid local directories based on disks' health */ public List<String> getLocalDirs() { return localDirs.getGoodDirs(); } /** * @return the good/valid log directories based on disks' health */ public List<String> getLogDirs() { return logDirs.getGoodDirs(); } /** * @return the local directories which have no disk space */ public List<String> getDiskFullLocalDirs() { return localDirs.getFullDirs(); } /** * @return the log directories that have no disk space */ public List<String> getDiskFullLogDirs() { return logDirs.getFullDirs(); } /** * Function to get the local dirs which should be considered for reading * existing files on disk. Contains the good local dirs and the local dirs * that have reached the disk space limit * * @return the local dirs which should be considered for reading */ public List<String> getLocalDirsForRead() { return DirectoryCollection.concat(localDirs.getGoodDirs(), localDirs.getFullDirs()); } /** * Function to get the local dirs which should be considered when cleaning up * resources. Contains the good local dirs and the local dirs that have reached * the disk space limit * * @return the local dirs which should be considered for cleaning up */ public List<String> getLocalDirsForCleanup() { return DirectoryCollection.concat(localDirs.getGoodDirs(), localDirs.getFullDirs()); } /** * Function to get the log dirs which should be considered for reading * existing files on disk. Contains the good log dirs and the log dirs that * have reached the disk space limit * * @return the log dirs which should be considered for reading */ public List<String> getLogDirsForRead() { return DirectoryCollection.concat(logDirs.getGoodDirs(), logDirs.getFullDirs()); } /** * Function to get the log dirs which should be considered when cleaning up * resources. Contains the good log dirs and the log dirs that have reached * the disk space limit * * @return the log dirs which should be considered for cleaning up */ public List<String> getLogDirsForCleanup() { return DirectoryCollection.concat(logDirs.getGoodDirs(), logDirs.getFullDirs()); } /** * Function to generate a report on the state of the disks. * * @param listGoodDirs * flag to determine whether the report should report the state of * good dirs or failed dirs * @return the health report of nm-local-dirs and nm-log-dirs */ public String getDisksHealthReport(boolean listGoodDirs) { if (!isDiskHealthCheckerEnabled) { return ""; } StringBuilder report = new StringBuilder(); List<String> failedLocalDirsList = localDirs.getFailedDirs(); List<String> failedLogDirsList = logDirs.getFailedDirs(); List<String> goodLocalDirsList = localDirs.getGoodDirs(); List<String> goodLogDirsList = logDirs.getGoodDirs(); int numLocalDirs = goodLocalDirsList.size() + failedLocalDirsList.size(); int numLogDirs = goodLogDirsList.size() + failedLogDirsList.size(); if (!listGoodDirs) { if (!failedLocalDirsList.isEmpty()) { report.append(failedLocalDirsList.size() + "/" + numLocalDirs + " local-dirs are bad: " + StringUtils.join(",", failedLocalDirsList) + "; "); } if (!failedLogDirsList.isEmpty()) { report.append(failedLogDirsList.size() + "/" + numLogDirs + " log-dirs are bad: " + StringUtils.join(",", failedLogDirsList)); } } else { report.append(goodLocalDirsList.size() + "/" + numLocalDirs + " local-dirs are good: " + StringUtils.join(",", goodLocalDirsList) + "; "); report.append(goodLogDirsList.size() + "/" + numLogDirs + " log-dirs are good: " + StringUtils.join(",", goodLogDirsList)); } return report.toString(); } /** * The minimum fraction of number of disks needed to be healthy for a node to * be considered healthy in terms of disks is configured using * {@link YarnConfiguration#NM_MIN_HEALTHY_DISKS_FRACTION}, with a default * value of {@link YarnConfiguration#DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION}. * @return <em>false</em> if either (a) more than the allowed percentage of * nm-local-dirs failed or (b) more than the allowed percentage of * nm-log-dirs failed. */ public boolean areDisksHealthy() { if (!isDiskHealthCheckerEnabled) { return true; } int goodDirs = getLocalDirs().size(); int failedDirs = localDirs.getFailedDirs().size(); int totalConfiguredDirs = goodDirs + failedDirs; if (goodDirs/(float)totalConfiguredDirs < minNeededHealthyDisksFactor) { return false; // Not enough healthy local-dirs } goodDirs = getLogDirs().size(); failedDirs = logDirs.getFailedDirs().size(); totalConfiguredDirs = goodDirs + failedDirs; if (goodDirs/(float)totalConfiguredDirs < minNeededHealthyDisksFactor) { return false; // Not enough healthy log-dirs } return true; } public long getLastDisksCheckTime() { return lastDisksCheckTime; } /** * Set good local dirs and good log dirs in the configuration so that the * LocalDirAllocator objects will use this updated configuration only. */ private void updateDirsAfterTest() { Configuration conf = getConfig(); List<String> localDirs = getLocalDirs(); conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, localDirs.toArray(new String[localDirs.size()])); List<String> logDirs = getLogDirs(); conf.setStrings(YarnConfiguration.NM_LOG_DIRS, logDirs.toArray(new String[logDirs.size()])); if (!areDisksHealthy()) { // Just log. LOG.error("Most of the disks failed. " + getDisksHealthReport(false)); } } private void logDiskStatus(boolean newDiskFailure, boolean diskTurnedGood) { if (newDiskFailure) { String report = getDisksHealthReport(false); LOG.info("Disk(s) failed: " + report); } if (diskTurnedGood) { String report = getDisksHealthReport(true); LOG.info("Disk(s) turned good: " + report); } } private void checkDirs() { boolean disksStatusChange = false; Set<String> failedLocalDirsPreCheck = new HashSet<String>(localDirs.getFailedDirs()); Set<String> failedLogDirsPreCheck = new HashSet<String>(logDirs.getFailedDirs()); if (localDirs.checkDirs()) { disksStatusChange = true; } if (logDirs.checkDirs()) { disksStatusChange = true; } Set<String> failedLocalDirsPostCheck = new HashSet<String>(localDirs.getFailedDirs()); Set<String> failedLogDirsPostCheck = new HashSet<String>(logDirs.getFailedDirs()); boolean disksFailed = false; boolean disksTurnedGood = false; disksFailed = disksTurnedBad(failedLocalDirsPreCheck, failedLocalDirsPostCheck); disksTurnedGood = disksTurnedGood(failedLocalDirsPreCheck, failedLocalDirsPostCheck); // skip check if we have new failed or good local dirs since we're going to // log anyway if (!disksFailed) { disksFailed = disksTurnedBad(failedLogDirsPreCheck, failedLogDirsPostCheck); } if (!disksTurnedGood) { disksTurnedGood = disksTurnedGood(failedLogDirsPreCheck, failedLogDirsPostCheck); } logDiskStatus(disksFailed, disksTurnedGood); if (disksStatusChange) { updateDirsAfterTest(); } updateMetrics(); lastDisksCheckTime = System.currentTimeMillis(); } private boolean disksTurnedBad(Set<String> preCheckFailedDirs, Set<String> postCheckDirs) { boolean disksFailed = false; for (String dir : postCheckDirs) { if (!preCheckFailedDirs.contains(dir)) { disksFailed = true; break; } } return disksFailed; } private boolean disksTurnedGood(Set<String> preCheckDirs, Set<String> postCheckDirs) { boolean disksTurnedGood = false; for (String dir : preCheckDirs) { if (!postCheckDirs.contains(dir)) { disksTurnedGood = true; break; } } return disksTurnedGood; } private Path getPathToRead(String pathStr, List<String> dirs) throws IOException { // remove the leading slash from the path (to make sure that the uri // resolution results in a valid path on the dir being checked) if (pathStr.startsWith("/")) { pathStr = pathStr.substring(1); } FileSystem localFS = FileSystem.getLocal(getConfig()); for (String dir : dirs) { try { Path tmpDir = new Path(dir); File tmpFile = tmpDir.isAbsolute() ? new File(localFS.makeQualified(tmpDir).toUri()) : new File(dir); Path file = new Path(tmpFile.getPath(), pathStr); if (localFS.exists(file)) { return file; } } catch (IOException ie) { // ignore LOG.warn("Failed to find " + pathStr + " at " + dir, ie); } } throw new IOException("Could not find " + pathStr + " in any of" + " the directories"); } public Path getLocalPathForWrite(String pathStr) throws IOException { return localDirsAllocator.getLocalPathForWrite(pathStr, getConfig()); } public Path getLocalPathForWrite(String pathStr, long size, boolean checkWrite) throws IOException { return localDirsAllocator.getLocalPathForWrite(pathStr, size, getConfig(), checkWrite); } public Path getLogPathForWrite(String pathStr, boolean checkWrite) throws IOException { return logDirsAllocator.getLocalPathForWrite(pathStr, LocalDirAllocator.SIZE_UNKNOWN, getConfig(), checkWrite); } public Path getLogPathToRead(String pathStr) throws IOException { return getPathToRead(pathStr, getLogDirsForRead()); } public static String[] validatePaths(String[] paths) { ArrayList<String> validPaths = new ArrayList<String>(); for (int i = 0; i < paths.length; ++i) { try { URI uriPath = (new Path(paths[i])).toUri(); if (uriPath.getScheme() == null || uriPath.getScheme().equals(FILE_SCHEME)) { validPaths.add(new Path(uriPath.getPath()).toString()); } else { LOG.warn(paths[i] + " is not a valid path. Path should be with " + FILE_SCHEME + " scheme or without scheme"); throw new YarnRuntimeException(paths[i] + " is not a valid path. Path should be with " + FILE_SCHEME + " scheme or without scheme"); } } catch (IllegalArgumentException e) { LOG.warn(e.getMessage()); throw new YarnRuntimeException(paths[i] + " is not a valid path. Path should be with " + FILE_SCHEME + " scheme or without scheme"); } } String[] arrValidPaths = new String[validPaths.size()]; validPaths.toArray(arrValidPaths); return arrValidPaths; } protected void updateMetrics() { if (nodeManagerMetrics != null) { nodeManagerMetrics.setBadLocalDirs(localDirs.getFailedDirs().size()); nodeManagerMetrics.setBadLogDirs(logDirs.getFailedDirs().size()); nodeManagerMetrics.setGoodLocalDirsDiskUtilizationPerc( localDirs.getGoodDirsDiskUtilizationPercentage()); nodeManagerMetrics.setGoodLogDirsDiskUtilizationPerc( logDirs.getGoodDirsDiskUtilizationPercentage()); } } }
19,426
33.753131
88
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.util.List; import org.apache.hadoop.yarn.api.records.ContainerId; public class CMgrCompletedContainersEvent extends ContainerManagerEvent { private final List<ContainerId> containerToCleanup; private final Reason reason; public CMgrCompletedContainersEvent(List<ContainerId> containersToCleanup, Reason reason) { super(ContainerManagerEventType.FINISH_CONTAINERS); this.containerToCleanup = containersToCleanup; this.reason = reason; } public List<ContainerId> getContainersToCleanup() { return this.containerToCleanup; } public Reason getReason() { return reason; } public static enum Reason { /** * Container is killed as NodeManager is shutting down */ ON_SHUTDOWN, /** * Container is killed as the Nodemanager is re-syncing with the * ResourceManager */ ON_NODEMANAGER_RESYNC, /** * Container is killed on request by the ResourceManager */ BY_RESOURCEMANAGER } }
1,870
28.234375
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.BufferedReader; import java.io.File; import java.io.FileDescriptor; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DelegateToFileSystem; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO.Windows; import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; /** * Windows secure container executor (WSCE). * This class offers a secure container executor on Windows, similar to the * LinuxContainerExecutor. As the NM does not run on a high privileged context, * this class delegates elevated operations to the helper hadoopwintuilsvc, * implemented by the winutils.exe running as a service. * JNI and LRPC is used to communicate with the privileged service. */ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor { private static final Log LOG = LogFactory .getLog(WindowsSecureContainerExecutor.class); public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s"; /** * This class is a container for the JNI Win32 native methods used by WSCE. */ private static class Native { private static boolean nativeLoaded = false; static { if (NativeCodeLoader.isNativeCodeLoaded()) { try { initWsceNative(); nativeLoaded = true; } catch (Throwable t) { LOG.info("Unable to initialize WSCE Native libraries", t); } } } /** Initialize the JNI method ID and class ID cache */ private static native void initWsceNative(); /** * This class contains methods used by the WindowsSecureContainerExecutor * file system operations. */ public static class Elevated { private static final int MOVE_FILE = 1; private static final int COPY_FILE = 2; public static void mkdir(Path dirName) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for mkdir"); } elevatedMkDirImpl(dirName.toString()); } private static native void elevatedMkDirImpl(String dirName) throws IOException; public static void chown(Path fileName, String user, String group) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for chown"); } elevatedChownImpl(fileName.toString(), user, group); } private static native void elevatedChownImpl(String fileName, String user, String group) throws IOException; public static void move(Path src, Path dst, boolean replaceExisting) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for move"); } elevatedCopyImpl(MOVE_FILE, src.toString(), dst.toString(), replaceExisting); } public static void copy(Path src, Path dst, boolean replaceExisting) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for copy"); } elevatedCopyImpl(COPY_FILE, src.toString(), dst.toString(), replaceExisting); } private static native void elevatedCopyImpl(int operation, String src, String dst, boolean replaceExisting) throws IOException; public static void chmod(Path fileName, int mode) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for chmod"); } elevatedChmodImpl(fileName.toString(), mode); } private static native void elevatedChmodImpl(String path, int mode) throws IOException; public static void killTask(String containerName) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for killTask"); } elevatedKillTaskImpl(containerName); } private static native void elevatedKillTaskImpl(String containerName) throws IOException; public static OutputStream create(Path f, boolean append) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for create"); } long desiredAccess = Windows.GENERIC_WRITE; long shareMode = 0L; long creationDisposition = append ? Windows.OPEN_ALWAYS : Windows.CREATE_ALWAYS; long flags = Windows.FILE_ATTRIBUTE_NORMAL; String fileName = f.toString(); fileName = fileName.replace('/', '\\'); long hFile = elevatedCreateImpl( fileName, desiredAccess, shareMode, creationDisposition, flags); return new FileOutputStream( WinutilsProcessStub.getFileDescriptorFromHandle(hFile)); } private static native long elevatedCreateImpl(String path, long desiredAccess, long shareMode, long creationDisposition, long flags) throws IOException; public static boolean deleteFile(Path path) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for deleteFile"); } return elevatedDeletePathImpl(path.toString(), false); } public static boolean deleteDirectory(Path path) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for deleteDirectory"); } return elevatedDeletePathImpl(path.toString(), true); } public native static boolean elevatedDeletePathImpl(String path, boolean isDir) throws IOException; } /** * Wraps a process started by the winutils service helper. * */ public static class WinutilsProcessStub extends Process { private final long hProcess; private final long hThread; private boolean disposed = false; private final InputStream stdErr; private final InputStream stdOut; private final OutputStream stdIn; public WinutilsProcessStub(long hProcess, long hThread, long hStdIn, long hStdOut, long hStdErr) { this.hProcess = hProcess; this.hThread = hThread; this.stdIn = new FileOutputStream(getFileDescriptorFromHandle(hStdIn)); this.stdOut = new FileInputStream(getFileDescriptorFromHandle(hStdOut)); this.stdErr = new FileInputStream(getFileDescriptorFromHandle(hStdErr)); } public static native FileDescriptor getFileDescriptorFromHandle(long handle); @Override public native void destroy(); @Override public native int exitValue(); @Override public InputStream getErrorStream() { return stdErr; } @Override public InputStream getInputStream() { return stdOut; } @Override public OutputStream getOutputStream() { return stdIn; } @Override public native int waitFor() throws InterruptedException; public synchronized native void dispose(); public native void resume() throws NativeIOException; } public synchronized static WinutilsProcessStub createTaskAsUser( String cwd, String jobName, String user, String pidFile, String cmdLine) throws IOException { if (!nativeLoaded) { throw new IOException( "Native WSCE libraries are required for createTaskAsUser"); } synchronized(Shell.WindowsProcessLaunchLock) { return createTaskAsUser0(cwd, jobName, user, pidFile, cmdLine); } } private static native WinutilsProcessStub createTaskAsUser0( String cwd, String jobName, String user, String pidFile, String cmdLine) throws NativeIOException; } /** * A shell script wrapper builder for WSCE. * Overwrites the default behavior to remove the creation of the PID file in * the script wrapper. WSCE creates the pid file as part of launching the * task in winutils. */ private class WindowsSecureWrapperScriptBuilder extends LocalWrapperScriptBuilder { public WindowsSecureWrapperScriptBuilder(Path containerWorkDir) { super(containerWorkDir); } @Override protected void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout) { pout.format("@call \"%s\"", launchDst); } } /** * This is a skeleton file system used to elevate certain operations. * WSCE has to create container dirs under local/userchache/$user but * this dir itself is owned by $user, with chmod 750. As ther NM has no * write access, it must delegate the write operations to the privileged * hadoopwintuilsvc. */ private static class ElevatedFileSystem extends DelegateToFileSystem { /** * This overwrites certain RawLocalSystem operations to be performed by a * privileged process. * */ private static class ElevatedRawLocalFilesystem extends RawLocalFileSystem { @Override protected boolean mkOneDirWithMode(Path path, File p2f, FsPermission permission) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:mkOneDirWithMode: %s %s", path, permission)); } boolean ret = false; // File.mkdir returns false, does not throw. Must mimic it. try { Native.Elevated.mkdir(path); setPermission(path, permission); ret = true; } catch(Throwable e) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:mkOneDirWithMode: %s", org.apache.hadoop.util.StringUtils.stringifyException(e))); } } return ret; } @Override public void setPermission(Path p, FsPermission permission) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:setPermission: %s %s", p, permission)); } Native.Elevated.chmod(p, permission.toShort()); } @Override public void setOwner(Path p, String username, String groupname) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:setOwner: %s %s %s", p, username, groupname)); } Native.Elevated.chown(p, username, groupname); } @Override protected OutputStream createOutputStreamWithMode(Path f, boolean append, FsPermission permission) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:createOutputStreamWithMode: %s %b %s", f, append, permission)); } boolean success = false; OutputStream os = Native.Elevated.create(f, append); try { setPermission(f, permission); success = true; return os; } finally { if (!success) { IOUtils.cleanup(LOG, os); } } } @Override public boolean delete(Path p, boolean recursive) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:delete: %s %b", p, recursive)); } // The super delete uses the FileUtil.fullyDelete, // but we cannot rely on that because we need to use the elevated // operations to remove the files // File f = pathToFile(p); if (!f.exists()) { //no path, return false "nothing to delete" return false; } else if (f.isFile()) { return Native.Elevated.deleteFile(p); } else if (f.isDirectory()) { // This is a best-effort attempt. There are race conditions in that // child files can be created/deleted after we snapped the list. // No need to protect against that case. File[] files = FileUtil.listFiles(f); int childCount = files.length; if (recursive) { for(File child:files) { if (delete(new Path(child.getPath()), recursive)) { --childCount; } } } if (childCount == 0) { return Native.Elevated.deleteDirectory(p); } else { throw new IOException("Directory " + f.toString() + " is not empty"); } } else { // This can happen under race conditions if an external agent // is messing with the file type between IFs throw new IOException("Path " + f.toString() + " exists, but is neither a file nor a directory"); } } } protected ElevatedFileSystem() throws IOException, URISyntaxException { super(FsConstants.LOCAL_FS_URI, new ElevatedRawLocalFilesystem(), new Configuration(), FsConstants.LOCAL_FS_URI.getScheme(), false); } } private static class WintuilsProcessStubExecutor implements Shell.CommandExecutor { private Native.WinutilsProcessStub processStub; private StringBuilder output = new StringBuilder(); private int exitCode; private enum State { INIT, RUNNING, COMPLETE }; private State state;; private final String cwd; private final String jobName; private final String userName; private final String pidFile; private final String cmdLine; public WintuilsProcessStubExecutor( String cwd, String jobName, String userName, String pidFile, String cmdLine) { this.cwd = cwd; this.jobName = jobName; this.userName = userName; this.pidFile = pidFile; this.cmdLine = cmdLine; this.state = State.INIT; } private void assertComplete() throws IOException { if (state != State.COMPLETE) { throw new IOException("Process is not complete"); } } public String getOutput () throws IOException { assertComplete(); return output.toString(); } public int getExitCode() throws IOException { assertComplete(); return exitCode; } public void validateResult() throws IOException { assertComplete(); if (0 != exitCode) { LOG.warn(output.toString()); throw new IOException("Processs exit code is:" + exitCode); } } private Thread startStreamReader(final InputStream stream) throws IOException { Thread streamReaderThread = new Thread() { @Override public void run() { try (BufferedReader lines = new BufferedReader( new InputStreamReader(stream, Charset.forName("UTF-8")))) { char[] buf = new char[512]; int nRead; while ((nRead = lines.read(buf, 0, buf.length)) > 0) { output.append(buf, 0, nRead); } } catch (Throwable t) { LOG.error("Error occured reading the process stdout", t); } } }; streamReaderThread.start(); return streamReaderThread; } public void execute() throws IOException { if (state != State.INIT) { throw new IOException("Process is already started"); } processStub = Native.createTaskAsUser(cwd, jobName, userName, pidFile, cmdLine); state = State.RUNNING; Thread stdOutReader = startStreamReader(processStub.getInputStream()); Thread stdErrReader = startStreamReader(processStub.getErrorStream()); try { processStub.resume(); processStub.waitFor(); stdOutReader.join(); stdErrReader.join(); } catch(InterruptedException ie) { throw new IOException(ie); } exitCode = processStub.exitValue(); state = State.COMPLETE; } @Override public void close() { if (processStub != null) { processStub.dispose(); } } } private String nodeManagerGroup; /** * Permissions for user WSCE dirs. */ static final short DIR_PERM = (short)0750; public WindowsSecureContainerExecutor() throws IOException, URISyntaxException { super(FileContext.getFileContext(new ElevatedFileSystem(), new Configuration())); } @Override public void setConf(Configuration conf) { super.setConf(conf); nodeManagerGroup = conf.get( YarnConfiguration.NM_WINDOWS_SECURE_CONTAINER_GROUP); } @Override protected String[] getRunCommand(String command, String groupId, String userName, Path pidFile, Configuration conf) { File f = new File(command); if (LOG.isDebugEnabled()) { LOG.debug(String.format("getRunCommand: %s exists:%b", command, f.exists())); } return new String[] { Shell.WINUTILS, "task", "createAsUser", groupId, userName, pidFile.toString(), "cmd /c " + command }; } @Override protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder( String containerIdStr, Path containerWorkDir) { return new WindowsSecureWrapperScriptBuilder(containerWorkDir); } @Override protected void copyFile(Path src, Path dst, String owner) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(), dst.toString(), owner)); } Native.Elevated.copy(src, dst, true); Native.Elevated.chown(dst, owner, nodeManagerGroup); } @Override protected void createDir(Path dirPath, FsPermission perms, boolean createParent, String owner) throws IOException { // WSCE requires dirs to be 750, not 710 as DCE. // This is similar to how LCE creates dirs // perms = new FsPermission(DIR_PERM); if (LOG.isDebugEnabled()) { LOG.debug(String.format("createDir: %s perm:%s owner:%s", dirPath.toString(), perms.toString(), owner)); } super.createDir(dirPath, perms, createParent, owner); lfs.setOwner(dirPath, owner, nodeManagerGroup); } @Override protected void setScriptExecutable(Path script, String owner) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("setScriptExecutable: %s owner:%s", script.toString(), owner)); } super.setScriptExecutable(script, owner); Native.Elevated.chown(script, owner, nodeManagerGroup); } @Override public Path localizeClasspathJar(Path classPathJar, Path pwd, String owner) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("localizeClasspathJar: %s %s o:%s", classPathJar, pwd, owner)); } createDir(pwd, new FsPermission(DIR_PERM), true, owner); String fileName = classPathJar.getName(); Path dst = new Path(pwd, fileName); Native.Elevated.move(classPathJar, dst, true); Native.Elevated.chown(dst, owner, nodeManagerGroup); return dst; } @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens(); InetSocketAddress nmAddr = ctx.getNmAddr(); String user = ctx.getUser(); String appId = ctx.getAppId(); String locId = ctx.getLocId(); LocalDirsHandlerService dirsHandler = ctx.getDirsHandler(); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); Path classpathJarPrivateDir = dirsHandler.getLocalPathForWrite( ResourceLocalizationService.NM_PRIVATE_DIR); createUserLocalDirs(localDirs, user); createUserCacheDirs(localDirs, user); createAppDirs(localDirs, user, appId); createAppLogDirs(appId, logDirs, user); Path appStorageDir = getWorkingDir(localDirs, user, appId); String tokenFn = String.format( ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId); Path tokenDst = new Path(appStorageDir, tokenFn); copyFile(nmPrivateContainerTokensPath, tokenDst, user); File cwdApp = new File(appStorageDir.toString()); if (LOG.isDebugEnabled()) { LOG.debug(String.format("cwdApp: %s", cwdApp)); } List<String> command ; command = new ArrayList<String>(); //use same jvm as parent File jvm = new File( new File(System.getProperty("java.home"), "bin"), "java.exe"); command.add(jvm.toString()); Path cwdPath = new Path(cwdApp.getPath()); // Build a temp classpath jar. See ContainerLaunch.sanitizeEnv(). // Passing CLASSPATH explicitly is *way* too long for command line. String classPath = System.getProperty("java.class.path"); Map<String, String> env = new HashMap<String, String>(System.getenv()); String jarCp[] = FileUtil.createJarWithClassPath(classPath, classpathJarPrivateDir, cwdPath, env); String classPathJar = localizeClasspathJar( new Path(jarCp[0]), cwdPath, user).toString(); command.add("-classpath"); command.add(classPathJar + jarCp[1]); String javaLibPath = System.getProperty("java.library.path"); if (javaLibPath != null) { command.add("-Djava.library.path=" + javaLibPath); } command.addAll(ContainerLocalizer.getJavaOpts(getConf())); ContainerLocalizer.buildMainArgs(command, user, appId, locId, nmAddr, localDirs); String cmdLine = StringUtils.join(command, " "); String localizerPid = String.format(LOCALIZER_PID_FORMAT, locId); WintuilsProcessStubExecutor stubExecutor = new WintuilsProcessStubExecutor( cwdApp.getAbsolutePath(), localizerPid, user, "nul:", cmdLine); try { stubExecutor.execute(); stubExecutor.validateResult(); } finally { stubExecutor.close(); try { killContainer(localizerPid, Signal.KILL); } catch(Throwable e) { LOG.warn(String.format( "An exception occured during the cleanup of localizer job %s:%n%s", localizerPid, org.apache.hadoop.util.StringUtils.stringifyException(e))); } } } @Override protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, String containerIdStr, String userName, Path pidFile, Resource resource, File wordDir, Map<String, String> environment) throws IOException { return new WintuilsProcessStubExecutor( wordDir.toString(), containerIdStr, userName, pidFile.toString(), "cmd /c " + wrapperScriptPath); } @Override protected void killContainer(String pid, Signal signal) throws IOException { Native.Elevated.killTask(pid); } }
25,208
32.746988
104
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.yarn.event.AbstractEvent; public class NodeManagerEvent extends AbstractEvent<NodeManagerEventType>{ public NodeManagerEvent(NodeManagerEventType type) { super(type); } }
1,068
35.862069
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.yarn.event.AbstractEvent; public class ContainerManagerEvent extends AbstractEvent<ContainerManagerEventType> { public ContainerManagerEvent(ContainerManagerEventType type) { super(type); } }
1,075
34.866667
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import com.google.common.base.Optional; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; public class DefaultContainerExecutor extends ContainerExecutor { private static final Log LOG = LogFactory .getLog(DefaultContainerExecutor.class); private static final int WIN_MAX_PATH = 260; protected final FileContext lfs; public DefaultContainerExecutor() { try { this.lfs = FileContext.getLocalFSFileContext(); } catch (UnsupportedFileSystemException e) { throw new RuntimeException(e); } } DefaultContainerExecutor(FileContext lfs) { this.lfs = lfs; } protected void copyFile(Path src, Path dst, String owner) throws IOException { lfs.util().copy(src, dst); } protected void setScriptExecutable(Path script, String owner) throws IOException { lfs.setPermission(script, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); } @Override public void init() throws IOException { // nothing to do or verify here } @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { Path nmPrivateContainerTokensPath = ctx.getNmPrivateContainerTokens(); InetSocketAddress nmAddr = ctx.getNmAddr(); String user = ctx.getUser(); String appId = ctx.getAppId(); String locId = ctx.getLocId(); LocalDirsHandlerService dirsHandler = ctx.getDirsHandler(); List<String> localDirs = dirsHandler.getLocalDirs(); List<String> logDirs = dirsHandler.getLogDirs(); createUserLocalDirs(localDirs, user); createUserCacheDirs(localDirs, user); createAppDirs(localDirs, user, appId); createAppLogDirs(appId, logDirs, user); // randomly choose the local directory Path appStorageDir = getWorkingDir(localDirs, user, appId); String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId); Path tokenDst = new Path(appStorageDir, tokenFn); copyFile(nmPrivateContainerTokensPath, tokenDst, user); LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + tokenDst); FileContext localizerFc = FileContext.getFileContext( lfs.getDefaultFileSystem(), getConf()); localizerFc.setUMask(lfs.getUMask()); localizerFc.setWorkingDirectory(appStorageDir); LOG.info("Localizer CWD set to " + appStorageDir + " = " + localizerFc.getWorkingDirectory()); ContainerLocalizer localizer = new ContainerLocalizer(localizerFc, user, appId, locId, getPaths(localDirs), RecordFactoryProvider.getRecordFactory(getConf())); // TODO: DO it over RPC for maintaining similarity? localizer.runLocalization(nmAddr); } @Override public int launchContainer(ContainerStartContext ctx) throws IOException { Container container = ctx.getContainer(); Path nmPrivateContainerScriptPath = ctx.getNmPrivateContainerScriptPath(); Path nmPrivateTokensPath = ctx.getNmPrivateTokensPath(); String user = ctx.getUser(); Path containerWorkDir = ctx.getContainerWorkDir(); List<String> localDirs = ctx.getLocalDirs(); List<String> logDirs = ctx.getLogDirs(); FsPermission dirPerm = new FsPermission(APPDIR_PERM); ContainerId containerId = container.getContainerId(); // create container dirs on all disks String containerIdStr = ConverterUtils.toString(containerId); String appIdStr = ConverterUtils.toString( containerId.getApplicationAttemptId(). getApplicationId()); for (String sLocalDir : localDirs) { Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE); Path userdir = new Path(usersdir, user); Path appCacheDir = new Path(userdir, ContainerLocalizer.APPCACHE); Path appDir = new Path(appCacheDir, appIdStr); Path containerDir = new Path(appDir, containerIdStr); createDir(containerDir, dirPerm, true, user); } // Create the container log-dirs on all disks createContainerLogDirs(appIdStr, containerIdStr, logDirs, user); Path tmpDir = new Path(containerWorkDir, YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR); createDir(tmpDir, dirPerm, false, user); // copy container tokens to work dir Path tokenDst = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE); copyFile(nmPrivateTokensPath, tokenDst, user); // copy launch script to work dir Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); copyFile(nmPrivateContainerScriptPath, launchDst, user); // Create new local launch wrapper script LocalWrapperScriptBuilder sb = getLocalWrapperScriptBuilder( containerIdStr, containerWorkDir); // Fail fast if attempting to launch the wrapper script would fail due to // Windows path length limitation. if (Shell.WINDOWS && sb.getWrapperScriptPath().toString().length() > WIN_MAX_PATH) { throw new IOException(String.format( "Cannot launch container using script at path %s, because it exceeds " + "the maximum supported path length of %d characters. Consider " + "configuring shorter directories in %s.", sb.getWrapperScriptPath(), WIN_MAX_PATH, YarnConfiguration.NM_LOCAL_DIRS)); } Path pidFile = getPidFilePath(containerId); if (pidFile != null) { sb.writeLocalWrapperScript(launchDst, pidFile); } else { LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } // create log dir under app // fork script Shell.CommandExecutor shExec = null; try { setScriptExecutable(launchDst, user); setScriptExecutable(sb.getWrapperScriptPath(), user); shExec = buildCommandExecutor(sb.getWrapperScriptPath().toString(), containerIdStr, user, pidFile, container.getResource(), new File(containerWorkDir.toUri().getPath()), container.getLaunchContext().getEnvironment()); if (isContainerActive(containerId)) { shExec.execute(); } else { LOG.info("Container " + containerIdStr + " was marked as inactive. Returning terminated error"); return ExitCode.TERMINATED.getExitCode(); } } catch (IOException e) { if (null == shExec) { return -1; } int exitCode = shExec.getExitCode(); LOG.warn("Exit code from container " + containerId + " is : " + exitCode); // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the container was // terminated/killed forcefully. In all other cases, log the // container-executor's output if (exitCode != ExitCode.FORCE_KILLED.getExitCode() && exitCode != ExitCode.TERMINATED.getExitCode()) { LOG.warn("Exception from container-launch with container ID: " + containerId + " and exit code: " + exitCode , e); StringBuilder builder = new StringBuilder(); builder.append("Exception from container-launch.\n"); builder.append("Container id: " + containerId + "\n"); builder.append("Exit code: " + exitCode + "\n"); if (!Optional.fromNullable(e.getMessage()).or("").isEmpty()) { builder.append("Exception message: " + e.getMessage() + "\n"); } builder.append("Stack trace: " + StringUtils.stringifyException(e) + "\n"); if (!shExec.getOutput().isEmpty()) { builder.append("Shell output: " + shExec.getOutput() + "\n"); } String diagnostics = builder.toString(); logOutput(diagnostics); container.handle(new ContainerDiagnosticsUpdateEvent(containerId, diagnostics)); } else { container.handle(new ContainerDiagnosticsUpdateEvent(containerId, "Container killed on request. Exit code is " + exitCode)); } return exitCode; } finally { if (shExec != null) shExec.close(); } return 0; } protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, String containerIdStr, String user, Path pidFile, Resource resource, File wordDir, Map<String, String> environment) throws IOException { String[] command = getRunCommand(wrapperScriptPath, containerIdStr, user, pidFile, this.getConf(), resource); LOG.info("launchContainer: " + Arrays.toString(command)); return new ShellCommandExecutor( command, wordDir, environment); } protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder( String containerIdStr, Path containerWorkDir) { return Shell.WINDOWS ? new WindowsLocalWrapperScriptBuilder(containerIdStr, containerWorkDir) : new UnixLocalWrapperScriptBuilder(containerWorkDir); } protected abstract class LocalWrapperScriptBuilder { private final Path wrapperScriptPath; public Path getWrapperScriptPath() { return wrapperScriptPath; } public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException { DataOutputStream out = null; PrintStream pout = null; try { out = lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE)); pout = new PrintStream(out, false, "UTF-8"); writeLocalWrapperScript(launchDst, pidFile, pout); } finally { IOUtils.cleanup(LOG, pout, out); } } protected abstract void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout); protected LocalWrapperScriptBuilder(Path containerWorkDir) { this.wrapperScriptPath = new Path(containerWorkDir, Shell.appendScriptExtension("default_container_executor")); } } private final class UnixLocalWrapperScriptBuilder extends LocalWrapperScriptBuilder { private final Path sessionScriptPath; public UnixLocalWrapperScriptBuilder(Path containerWorkDir) { super(containerWorkDir); this.sessionScriptPath = new Path(containerWorkDir, Shell.appendScriptExtension("default_container_executor_session")); } @Override public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException { writeSessionScript(launchDst, pidFile); super.writeLocalWrapperScript(launchDst, pidFile); } @Override public void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout) { String exitCodeFile = ContainerLaunch.getExitCodeFile( pidFile.toString()); String tmpFile = exitCodeFile + ".tmp"; pout.println("#!/bin/bash"); pout.println("/bin/bash \"" + sessionScriptPath.toString() + "\""); pout.println("rc=$?"); pout.println("echo $rc > \"" + tmpFile + "\""); pout.println("/bin/mv -f \"" + tmpFile + "\" \"" + exitCodeFile + "\""); pout.println("exit $rc"); } private void writeSessionScript(Path launchDst, Path pidFile) throws IOException { DataOutputStream out = null; PrintStream pout = null; try { out = lfs.create(sessionScriptPath, EnumSet.of(CREATE, OVERWRITE)); pout = new PrintStream(out, false, "UTF-8"); // We need to do a move as writing to a file is not atomic // Process reading a file being written to may get garbled data // hence write pid to tmp file first followed by a mv pout.println("#!/bin/bash"); pout.println(); pout.println("echo $$ > " + pidFile.toString() + ".tmp"); pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile); String exec = Shell.isSetsidAvailable? "exec setsid" : "exec"; pout.println(exec + " /bin/bash \"" + launchDst.toUri().getPath().toString() + "\""); } finally { IOUtils.cleanup(LOG, pout, out); } lfs.setPermission(sessionScriptPath, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); } } private final class WindowsLocalWrapperScriptBuilder extends LocalWrapperScriptBuilder { private final String containerIdStr; public WindowsLocalWrapperScriptBuilder(String containerIdStr, Path containerWorkDir) { super(containerWorkDir); this.containerIdStr = containerIdStr; } @Override public void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream pout) { // TODO: exit code script for Windows // On Windows, the pid is the container ID, so that it can also serve as // the name of the job object created by winutils for task management. // Write to temp file followed by atomic move. String normalizedPidFile = new File(pidFile.toString()).getPath(); pout.println("@echo " + containerIdStr + " > " + normalizedPidFile + ".tmp"); pout.println("@move /Y " + normalizedPidFile + ".tmp " + normalizedPidFile); pout.println("@call " + launchDst.toString()); } } @Override public boolean signalContainer(ContainerSignalContext ctx) throws IOException { String user = ctx.getUser(); String pid = ctx.getPid(); Signal signal = ctx.getSignal(); LOG.debug("Sending signal " + signal.getValue() + " to pid " + pid + " as user " + user); if (!containerIsAlive(pid)) { return false; } try { killContainer(pid, signal); } catch (IOException e) { if (!containerIsAlive(pid)) { return false; } throw e; } return true; } @Override public boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException { String pid = ctx.getPid(); return containerIsAlive(pid); } /** * Returns true if the process with the specified pid is alive. * * @param pid String pid * @return boolean true if the process is alive */ @VisibleForTesting public static boolean containerIsAlive(String pid) throws IOException { try { new ShellCommandExecutor(Shell.getCheckProcessIsAliveCommand(pid)) .execute(); // successful execution means process is alive return true; } catch (ExitCodeException e) { // failure (non-zero exit code) means process is not alive return false; } } /** * Send a specified signal to the specified pid * * @param pid the pid of the process [group] to signal. * @param signal signal to send * (for logging). */ protected void killContainer(String pid, Signal signal) throws IOException { new ShellCommandExecutor(Shell.getSignalKillCommand(signal.getValue(), pid)) .execute(); } @Override public void deleteAsUser(DeletionAsUserContext ctx) throws IOException, InterruptedException { Path subDir = ctx.getSubDir(); List<Path> baseDirs = ctx.getBasedirs(); if (baseDirs == null || baseDirs.size() == 0) { LOG.info("Deleting absolute path : " + subDir); if (!lfs.delete(subDir, true)) { //Maybe retry LOG.warn("delete returned false for path: [" + subDir + "]"); } return; } for (Path baseDir : baseDirs) { Path del = subDir == null ? baseDir : new Path(baseDir, subDir); LOG.info("Deleting path : " + del); if (!lfs.delete(del, true)) { LOG.warn("delete returned false for path: [" + del + "]"); } } } /** Permissions for user dir. * $local.dir/usercache/$user */ static final short USER_PERM = (short)0750; /** Permissions for user appcache dir. * $local.dir/usercache/$user/appcache */ static final short APPCACHE_PERM = (short)0710; /** Permissions for user filecache dir. * $local.dir/usercache/$user/filecache */ static final short FILECACHE_PERM = (short)0710; /** Permissions for user app dir. * $local.dir/usercache/$user/appcache/$appId */ static final short APPDIR_PERM = (short)0710; /** Permissions for user log dir. * $logdir/$user/$appId */ static final short LOGDIR_PERM = (short)0710; private long getDiskFreeSpace(Path base) throws IOException { return lfs.getFsStatus(base).getRemaining(); } private Path getApplicationDir(Path base, String user, String appId) { return new Path(getAppcacheDir(base, user), appId); } private Path getUserCacheDir(Path base, String user) { return new Path(new Path(base, ContainerLocalizer.USERCACHE), user); } private Path getAppcacheDir(Path base, String user) { return new Path(getUserCacheDir(base, user), ContainerLocalizer.APPCACHE); } private Path getFileCacheDir(Path base, String user) { return new Path(getUserCacheDir(base, user), ContainerLocalizer.FILECACHE); } protected Path getWorkingDir(List<String> localDirs, String user, String appId) throws IOException { Path appStorageDir = null; long totalAvailable = 0L; long[] availableOnDisk = new long[localDirs.size()]; int i = 0; // randomly choose the app directory // the chance of picking a directory is proportional to // the available space on the directory. // firstly calculate the sum of all available space on these directories for (String localDir : localDirs) { Path curBase = getApplicationDir(new Path(localDir), user, appId); long space = 0L; try { space = getDiskFreeSpace(curBase); } catch (IOException e) { LOG.warn("Unable to get Free Space for " + curBase.toString(), e); } availableOnDisk[i++] = space; totalAvailable += space; } // throw an IOException if totalAvailable is 0. if (totalAvailable <= 0L) { throw new IOException("Not able to find a working directory for " + user); } // make probability to pick a directory proportional to // the available space on the directory. long randomPosition = RandomUtils.nextLong() % totalAvailable; int dir = 0; // skip zero available space directory, // because totalAvailable is greater than 0 and randomPosition // is less than totalAvailable, we can find a valid directory // with nonzero available space. while (availableOnDisk[dir] == 0L) { dir++; } while (randomPosition > availableOnDisk[dir]) { randomPosition -= availableOnDisk[dir++]; } appStorageDir = getApplicationDir(new Path(localDirs.get(dir)), user, appId); return appStorageDir; } protected void createDir(Path dirPath, FsPermission perms, boolean createParent, String user) throws IOException { lfs.mkdir(dirPath, perms, createParent); if (!perms.equals(perms.applyUMask(lfs.getUMask()))) { lfs.setPermission(dirPath, perms); } } /** * Initialize the local directories for a particular user. * <ul>.mkdir * <li>$local.dir/usercache/$user</li> * </ul> */ void createUserLocalDirs(List<String> localDirs, String user) throws IOException { boolean userDirStatus = false; FsPermission userperms = new FsPermission(USER_PERM); for (String localDir : localDirs) { // create $local.dir/usercache/$user and its immediate parent try { createDir(getUserCacheDir(new Path(localDir), user), userperms, true, user); } catch (IOException e) { LOG.warn("Unable to create the user directory : " + localDir, e); continue; } userDirStatus = true; } if (!userDirStatus) { throw new IOException("Not able to initialize user directories " + "in any of the configured local directories for user " + user); } } /** * Initialize the local cache directories for a particular user. * <ul> * <li>$local.dir/usercache/$user</li> * <li>$local.dir/usercache/$user/appcache</li> * <li>$local.dir/usercache/$user/filecache</li> * </ul> */ void createUserCacheDirs(List<String> localDirs, String user) throws IOException { LOG.info("Initializing user " + user); boolean appcacheDirStatus = false; boolean distributedCacheDirStatus = false; FsPermission appCachePerms = new FsPermission(APPCACHE_PERM); FsPermission fileperms = new FsPermission(FILECACHE_PERM); for (String localDir : localDirs) { // create $local.dir/usercache/$user/appcache Path localDirPath = new Path(localDir); final Path appDir = getAppcacheDir(localDirPath, user); try { createDir(appDir, appCachePerms, true, user); appcacheDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create app cache directory : " + appDir, e); } // create $local.dir/usercache/$user/filecache final Path distDir = getFileCacheDir(localDirPath, user); try { createDir(distDir, fileperms, true, user); distributedCacheDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create file cache directory : " + distDir, e); } } if (!appcacheDirStatus) { throw new IOException("Not able to initialize app-cache directories " + "in any of the configured local directories for user " + user); } if (!distributedCacheDirStatus) { throw new IOException( "Not able to initialize distributed-cache directories " + "in any of the configured local directories for user " + user); } } /** * Initialize the local directories for a particular user. * <ul> * <li>$local.dir/usercache/$user/appcache/$appid</li> * </ul> * @param localDirs */ void createAppDirs(List<String> localDirs, String user, String appId) throws IOException { boolean initAppDirStatus = false; FsPermission appperms = new FsPermission(APPDIR_PERM); for (String localDir : localDirs) { Path fullAppDir = getApplicationDir(new Path(localDir), user, appId); // create $local.dir/usercache/$user/appcache/$appId try { createDir(fullAppDir, appperms, true, user); initAppDirStatus = true; } catch (IOException e) { LOG.warn("Unable to create app directory " + fullAppDir.toString(), e); } } if (!initAppDirStatus) { throw new IOException("Not able to initialize app directories " + "in any of the configured local directories for app " + appId.toString()); } } /** * Create application log directories on all disks. */ void createAppLogDirs(String appId, List<String> logDirs, String user) throws IOException { boolean appLogDirStatus = false; FsPermission appLogDirPerms = new FsPermission(LOGDIR_PERM); for (String rootLogDir : logDirs) { // create $log.dir/$appid Path appLogDir = new Path(rootLogDir, appId); try { createDir(appLogDir, appLogDirPerms, true, user); } catch (IOException e) { LOG.warn("Unable to create the app-log directory : " + appLogDir, e); continue; } appLogDirStatus = true; } if (!appLogDirStatus) { throw new IOException("Not able to initialize app-log directories " + "in any of the configured local directories for app " + appId); } } /** * Create application log directories on all disks. */ void createContainerLogDirs(String appId, String containerId, List<String> logDirs, String user) throws IOException { boolean containerLogDirStatus = false; FsPermission containerLogDirPerms = new FsPermission(LOGDIR_PERM); for (String rootLogDir : logDirs) { // create $log.dir/$appid/$containerid Path appLogDir = new Path(rootLogDir, appId); Path containerLogDir = new Path(appLogDir, containerId); try { createDir(containerLogDir, containerLogDirPerms, true, user); } catch (IOException e) { LOG.warn("Unable to create the container-log directory : " + appLogDir, e); continue; } containerLogDirStatus = true; } if (!containerLogDirStatus) { throw new IOException( "Not able to initialize container-log directories " + "in any of the configured local directories for container " + containerId); } } /** * @return the list of paths of given local directories */ private static List<Path> getPaths(List<String> dirs) { List<Path> paths = new ArrayList<Path>(dirs.size()); for (int i = 0; i < dirs.size(); i++) { paths.add(new Path(dirs.get(i))); } return paths; } }
27,410
35.162269
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import org.apache.hadoop.service.Service; public interface NodeResourceMonitor extends Service { }
945
35.384615
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.NodeHealthScriptRunner; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMLeveldbStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import com.google.common.annotations.VisibleForTesting; public class NodeManager extends CompositeService implements EventHandler<NodeManagerEvent> { /** * Priority of the NodeManager shutdown hook. */ public static final int SHUTDOWN_HOOK_PRIORITY = 30; private static final Log LOG = LogFactory.getLog(NodeManager.class); private static long nmStartupTime = System.currentTimeMillis(); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); private JvmPauseMonitor pauseMonitor; private ApplicationACLsManager aclsManager; private NodeHealthCheckerService nodeHealthChecker; private NodeLabelsProvider nodeLabelsProvider; private LocalDirsHandlerService dirsHandler; private Context context; private AsyncDispatcher dispatcher; private ContainerManagerImpl containerManager; private NodeStatusUpdater nodeStatusUpdater; private static CompositeServiceShutdownHook nodeManagerShutdownHook; private NMStateStoreService nmStore = null; private AtomicBoolean isStopping = new AtomicBoolean(false); private boolean rmWorkPreservingRestartEnabled; private boolean shouldExitOnShutdownEvent = false; public NodeManager() { super(NodeManager.class.getName()); } public static long getNMStartupTime() { return nmStartupTime; } protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics, nodeLabelsProvider); } protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeLabelsProvider nodeLabelsProvider) { return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics, nodeLabelsProvider); } @VisibleForTesting protected NodeLabelsProvider createNodeLabelsProvider( Configuration conf) throws IOException { // TODO as part of YARN-2729 // Need to get the implementation of provider service and return return null; } protected NodeResourceMonitor createNodeResourceMonitor() { return new NodeResourceMonitorImpl(); } protected ContainerManagerImpl createContainerManager(Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater, metrics, aclsManager, dirsHandler); } protected WebServer createWebServer(Context nmContext, ResourceView resourceView, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { return new WebServer(nmContext, resourceView, aclsManager, dirsHandler); } protected DeletionService createDeletionService(ContainerExecutor exec) { return new DeletionService(exec, nmStore); } protected NMContext createNMContext( NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager, NMStateStoreService stateStore) { return new NMContext(containerTokenSecretManager, nmTokenSecretManager, dirsHandler, aclsManager, stateStore); } protected void doSecureLogin() throws IOException { SecurityUtil.login(getConfig(), YarnConfiguration.NM_KEYTAB, YarnConfiguration.NM_PRINCIPAL); } private void initAndStartRecoveryStore(Configuration conf) throws IOException { boolean recoveryEnabled = conf.getBoolean( YarnConfiguration.NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED); if (recoveryEnabled) { FileSystem recoveryFs = FileSystem.getLocal(conf); String recoveryDirName = conf.get(YarnConfiguration.NM_RECOVERY_DIR); if (recoveryDirName == null) { throw new IllegalArgumentException("Recovery is enabled but " + YarnConfiguration.NM_RECOVERY_DIR + " is not set."); } Path recoveryRoot = new Path(recoveryDirName); recoveryFs.mkdirs(recoveryRoot, new FsPermission((short)0700)); nmStore = new NMLeveldbStateStoreService(); } else { nmStore = new NMNullStateStoreService(); } nmStore.init(conf); nmStore.start(); } private void stopRecoveryStore() throws IOException { if (null != nmStore) { nmStore.stop(); if (null != context) { if (context.getDecommissioned() && nmStore.canRecover()) { LOG.info("Removing state store due to decommission"); Configuration conf = getConfig(); Path recoveryRoot = new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR)); LOG.info("Removing state store at " + recoveryRoot + " due to decommission"); FileSystem recoveryFs = FileSystem.getLocal(conf); if (!recoveryFs.delete(recoveryRoot, true)) { LOG.warn("Unable to delete " + recoveryRoot); } } } } } private void recoverTokens(NMTokenSecretManagerInNM nmTokenSecretManager, NMContainerTokenSecretManager containerTokenSecretManager) throws IOException { if (nmStore.canRecover()) { nmTokenSecretManager.recover(); containerTokenSecretManager.recover(); } } public static NodeHealthScriptRunner getNodeHealthScriptRunner(Configuration conf) { String nodeHealthScript = conf.get(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_PATH); if(!NodeHealthScriptRunner.shouldRun(nodeHealthScript)) { LOG.info("Node Manager health check script is not available " + "or doesn't have execute permission, so not " + "starting the node health script runner."); return null; } long nmCheckintervalTime = conf.getLong( YarnConfiguration.NM_HEALTH_CHECK_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS); long scriptTimeout = conf.getLong( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS, YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS); String[] scriptArgs = conf.getStrings( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_OPTS, new String[] {}); return new NodeHealthScriptRunner(nodeHealthScript, nmCheckintervalTime, scriptTimeout, scriptArgs); } @Override protected void serviceInit(Configuration conf) throws Exception { conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); rmWorkPreservingRestartEnabled = conf.getBoolean(YarnConfiguration .RM_WORK_PRESERVING_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED); initAndStartRecoveryStore(conf); NMContainerTokenSecretManager containerTokenSecretManager = new NMContainerTokenSecretManager(conf, nmStore); NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(nmStore); recoverTokens(nmTokenSecretManager, containerTokenSecretManager); this.aclsManager = new ApplicationACLsManager(conf); ContainerExecutor exec = ReflectionUtils.newInstance( conf.getClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class), conf); try { exec.init(); } catch (IOException e) { throw new YarnRuntimeException("Failed to initialize container executor", e); } DeletionService del = createDeletionService(exec); addService(del); // NodeManager level dispatcher this.dispatcher = new AsyncDispatcher(); dirsHandler = new LocalDirsHandlerService(metrics); nodeHealthChecker = new NodeHealthCheckerService( getNodeHealthScriptRunner(conf), dirsHandler); addService(nodeHealthChecker); this.context = createNMContext(containerTokenSecretManager, nmTokenSecretManager, nmStore); nodeLabelsProvider = createNodeLabelsProvider(conf); if (null == nodeLabelsProvider) { nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker); } else { addService(nodeLabelsProvider); nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker, nodeLabelsProvider); } NodeResourceMonitor nodeResourceMonitor = createNodeResourceMonitor(); addService(nodeResourceMonitor); containerManager = createContainerManager(context, exec, del, nodeStatusUpdater, this.aclsManager, dirsHandler); addService(containerManager); ((NMContext) context).setContainerManager(containerManager); WebServer webServer = createWebServer(context, containerManager .getContainersMonitor(), this.aclsManager, dirsHandler); addService(webServer); ((NMContext) context).setWebServer(webServer); dispatcher.register(ContainerManagerEventType.class, containerManager); dispatcher.register(NodeManagerEventType.class, this); addService(dispatcher); pauseMonitor = new JvmPauseMonitor(conf); metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); DefaultMetricsSystem.initialize("NodeManager"); // StatusUpdater should be added last so that it get started last // so that we make sure everything is up before registering with RM. addService(nodeStatusUpdater); super.serviceInit(conf); // TODO add local dirs to del } @Override protected void serviceStart() throws Exception { try { doSecureLogin(); } catch (IOException e) { throw new YarnRuntimeException("Failed NodeManager login", e); } pauseMonitor.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { if (isStopping.getAndSet(true)) { return; } try { super.serviceStop(); DefaultMetricsSystem.shutdown(); if (pauseMonitor != null) { pauseMonitor.stop(); } } finally { // YARN-3641: NM's services stop get failed shouldn't block the // release of NMLevelDBStore. stopRecoveryStore(); } } public String getName() { return "NodeManager"; } protected void shutDown() { new Thread() { @Override public void run() { try { NodeManager.this.stop(); } catch (Throwable t) { LOG.error("Error while shutting down NodeManager", t); } finally { if (shouldExitOnShutdownEvent && !ShutdownHookManager.get().isShutdownInProgress()) { ExitUtil.terminate(-1); } } } }.start(); } protected void resyncWithRM() { //we do not want to block dispatcher thread here new Thread() { @Override public void run() { try { LOG.info("Notifying ContainerManager to block new container-requests"); containerManager.setBlockNewContainerRequests(true); if (!rmWorkPreservingRestartEnabled) { LOG.info("Cleaning up running containers on resync"); containerManager.cleanupContainersOnNMResync(); } else { LOG.info("Preserving containers on resync"); } ((NodeStatusUpdaterImpl) nodeStatusUpdater) .rebootNodeStatusUpdaterAndRegisterWithRM(); } catch (YarnRuntimeException e) { LOG.fatal("Error while rebooting NodeStatusUpdater.", e); shutDown(); } } }.start(); } public static class NMContext implements Context { private NodeId nodeId = null; protected final ConcurrentMap<ApplicationId, Application> applications = new ConcurrentHashMap<ApplicationId, Application>(); private volatile Map<ApplicationId, Credentials> systemCredentials = new HashMap<ApplicationId, Credentials>(); protected final ConcurrentMap<ContainerId, Container> containers = new ConcurrentSkipListMap<ContainerId, Container>(); private final NMContainerTokenSecretManager containerTokenSecretManager; private final NMTokenSecretManagerInNM nmTokenSecretManager; private ContainerManagementProtocol containerManager; private final LocalDirsHandlerService dirsHandler; private final ApplicationACLsManager aclsManager; private WebServer webServer; private final NodeHealthStatus nodeHealthStatus = RecordFactoryProvider .getRecordFactory(null).newRecordInstance(NodeHealthStatus.class); private final NMStateStoreService stateStore; private boolean isDecommissioned = false; private final ConcurrentLinkedQueue<LogAggregationReport> logAggregationReportForApps; public NMContext(NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager, LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager, NMStateStoreService stateStore) { this.containerTokenSecretManager = containerTokenSecretManager; this.nmTokenSecretManager = nmTokenSecretManager; this.dirsHandler = dirsHandler; this.aclsManager = aclsManager; this.nodeHealthStatus.setIsNodeHealthy(true); this.nodeHealthStatus.setHealthReport("Healthy"); this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis()); this.stateStore = stateStore; this.logAggregationReportForApps = new ConcurrentLinkedQueue< LogAggregationReport>(); } /** * Usable only after ContainerManager is started. */ @Override public NodeId getNodeId() { return this.nodeId; } @Override public int getHttpPort() { return this.webServer.getPort(); } @Override public ConcurrentMap<ApplicationId, Application> getApplications() { return this.applications; } @Override public ConcurrentMap<ContainerId, Container> getContainers() { return this.containers; } @Override public NMContainerTokenSecretManager getContainerTokenSecretManager() { return this.containerTokenSecretManager; } @Override public NMTokenSecretManagerInNM getNMTokenSecretManager() { return this.nmTokenSecretManager; } @Override public NodeHealthStatus getNodeHealthStatus() { return this.nodeHealthStatus; } @Override public ContainerManagementProtocol getContainerManager() { return this.containerManager; } public void setContainerManager(ContainerManagementProtocol containerManager) { this.containerManager = containerManager; } public void setWebServer(WebServer webServer) { this.webServer = webServer; } public void setNodeId(NodeId nodeId) { this.nodeId = nodeId; } @Override public LocalDirsHandlerService getLocalDirsHandler() { return dirsHandler; } @Override public ApplicationACLsManager getApplicationACLsManager() { return aclsManager; } @Override public NMStateStoreService getNMStateStore() { return stateStore; } @Override public boolean getDecommissioned() { return isDecommissioned; } @Override public void setDecommissioned(boolean isDecommissioned) { this.isDecommissioned = isDecommissioned; } @Override public Map<ApplicationId, Credentials> getSystemCredentialsForApps() { return systemCredentials; } public void setSystemCrendentialsForApps( Map<ApplicationId, Credentials> systemCredentials) { this.systemCredentials = systemCredentials; } @Override public ConcurrentLinkedQueue<LogAggregationReport> getLogAggregationStatusForApps() { return this.logAggregationReportForApps; } } /** * @return the node health checker */ public NodeHealthCheckerService getNodeHealthChecker() { return nodeHealthChecker; } private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) { try { // Remove the old hook if we are rebooting. if (hasToReboot && null != nodeManagerShutdownHook) { ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook); } nodeManagerShutdownHook = new CompositeServiceShutdownHook(this); ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook, SHUTDOWN_HOOK_PRIORITY); // System exit should be called only when NodeManager is instantiated from // main() funtion this.shouldExitOnShutdownEvent = true; this.init(conf); this.start(); } catch (Throwable t) { LOG.fatal("Error starting NodeManager", t); System.exit(-1); } } @Override public void handle(NodeManagerEvent event) { switch (event.getType()) { case SHUTDOWN: shutDown(); break; case RESYNC: resyncWithRM(); break; default: LOG.warn("Invalid shutdown event " + event.getType() + ". Ignoring."); } } // For testing NodeManager createNewNodeManager() { return new NodeManager(); } // For testing ContainerManagerImpl getContainerManager() { return containerManager; } //For testing Dispatcher getNMDispatcher(){ return dispatcher; } @VisibleForTesting public Context getNMContext() { return this.context; } public static void main(String[] args) throws IOException { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(NodeManager.class, args, LOG); NodeManager nodeManager = new NodeManager(); Configuration conf = new YarnConfiguration(); new GenericOptionsParser(conf, args); nodeManager.initAndStartNodeManager(conf, false); } @VisibleForTesting @Private public NodeStatusUpdater getNodeStatusUpdater() { return nodeStatusUpdater; } }
21,714
34.13754
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.security; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState; import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; /** * The NM maintains only two master-keys. The current key that RM knows and the * key from the previous rolling-interval. * */ public class NMContainerTokenSecretManager extends BaseContainerTokenSecretManager { private static final Log LOG = LogFactory .getLog(NMContainerTokenSecretManager.class); private MasterKeyData previousMasterKey; private final TreeMap<Long, List<ContainerId>> recentlyStartedContainerTracker; private final NMStateStoreService stateStore; private String nodeHostAddr; public NMContainerTokenSecretManager(Configuration conf) { this(conf, new NMNullStateStoreService()); } public NMContainerTokenSecretManager(Configuration conf, NMStateStoreService stateStore) { super(conf); recentlyStartedContainerTracker = new TreeMap<Long, List<ContainerId>>(); this.stateStore = stateStore; } public synchronized void recover() throws IOException { RecoveredContainerTokensState state = stateStore.loadContainerTokensState(); MasterKey key = state.getCurrentMasterKey(); if (key != null) { super.currentMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array())); } key = state.getPreviousMasterKey(); if (key != null) { previousMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array())); } // restore the serial number from the current master key if (super.currentMasterKey != null) { super.serialNo = super.currentMasterKey.getMasterKey().getKeyId() + 1; } for (Entry<ContainerId, Long> entry : state.getActiveTokens().entrySet()) { ContainerId containerId = entry.getKey(); Long expTime = entry.getValue(); List<ContainerId> containerList = recentlyStartedContainerTracker.get(expTime); if (containerList == null) { containerList = new ArrayList<ContainerId>(); recentlyStartedContainerTracker.put(expTime, containerList); } if (!containerList.contains(containerId)) { containerList.add(containerId); } } } private void updateCurrentMasterKey(MasterKeyData key) { super.currentMasterKey = key; try { stateStore.storeContainerTokenCurrentMasterKey(key.getMasterKey()); } catch (IOException e) { LOG.error("Unable to update current master key in state store", e); } } private void updatePreviousMasterKey(MasterKeyData key) { previousMasterKey = key; try { stateStore.storeContainerTokenPreviousMasterKey(key.getMasterKey()); } catch (IOException e) { LOG.error("Unable to update previous master key in state store", e); } } /** * Used by NodeManagers to create a token-secret-manager with the key obtained * from the RM. This can happen during registration or when the RM rolls the * master-key and signals the NM. * * @param masterKeyRecord */ @Private public synchronized void setMasterKey(MasterKey masterKeyRecord) { // Update keys only if the key has changed. if (super.currentMasterKey == null || super.currentMasterKey.getMasterKey() .getKeyId() != masterKeyRecord.getKeyId()) { LOG.info("Rolling master-key for container-tokens, got key with id " + masterKeyRecord.getKeyId()); if (super.currentMasterKey != null) { updatePreviousMasterKey(super.currentMasterKey); } updateCurrentMasterKey(new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord.getBytes().array()))); } } /** * Override of this is to validate ContainerTokens generated by using * different {@link MasterKey}s. */ @Override public synchronized byte[] retrievePassword( ContainerTokenIdentifier identifier) throws SecretManager.InvalidToken { int keyId = identifier.getMasterKeyId(); MasterKeyData masterKeyToUse = null; if (this.previousMasterKey != null && keyId == this.previousMasterKey.getMasterKey().getKeyId()) { // A container-launch has come in with a token generated off the last // master-key masterKeyToUse = this.previousMasterKey; } else if (keyId == super.currentMasterKey.getMasterKey().getKeyId()) { // A container-launch has come in with a token generated off the current // master-key masterKeyToUse = super.currentMasterKey; } if (nodeHostAddr != null && !identifier.getNmHostAddress().equals(nodeHostAddr)) { // Valid container token used for incorrect node. throw new SecretManager.InvalidToken("Given Container " + identifier.getContainerID().toString() + " identifier is not valid for current Node manager. Expected : " + nodeHostAddr + " Found : " + identifier.getNmHostAddress()); } if (masterKeyToUse != null) { return retrievePasswordInternal(identifier, masterKeyToUse); } // Invalid request. Like startContainer() with token generated off // old-master-keys. throw new SecretManager.InvalidToken("Given Container " + identifier.getContainerID().toString() + " seems to have an illegally generated token."); } /** * Container start has gone through. We need to store the containerId in order * to block future container start requests with same container token. This * container token needs to be saved till its container token expires. */ public synchronized void startContainerSuccessful( ContainerTokenIdentifier tokenId) { removeAnyContainerTokenIfExpired(); ContainerId containerId = tokenId.getContainerID(); Long expTime = tokenId.getExpiryTimeStamp(); // We might have multiple containers with same expiration time. if (!recentlyStartedContainerTracker.containsKey(expTime)) { recentlyStartedContainerTracker .put(expTime, new ArrayList<ContainerId>()); } recentlyStartedContainerTracker.get(expTime).add(containerId); try { stateStore.storeContainerToken(containerId, expTime); } catch (IOException e) { LOG.error("Unable to store token for container " + containerId, e); } } protected synchronized void removeAnyContainerTokenIfExpired() { // Trying to remove any container if its container token has expired. Iterator<Entry<Long, List<ContainerId>>> containersI = this.recentlyStartedContainerTracker.entrySet().iterator(); Long currTime = System.currentTimeMillis(); while (containersI.hasNext()) { Entry<Long, List<ContainerId>> containerEntry = containersI.next(); if (containerEntry.getKey() < currTime) { for (ContainerId container : containerEntry.getValue()) { try { stateStore.removeContainerToken(container); } catch (IOException e) { LOG.error("Unable to remove token for container " + container, e); } } containersI.remove(); } else { break; } } } /** * Container will be remembered based on expiration time of the container * token used for starting the container. It is safe to use expiration time * as there is one to many mapping between expiration time and containerId. * @return true if the current token identifier is not present in cache. */ public synchronized boolean isValidStartContainerRequest( ContainerTokenIdentifier containerTokenIdentifier) { removeAnyContainerTokenIfExpired(); Long expTime = containerTokenIdentifier.getExpiryTimeStamp(); List<ContainerId> containers = this.recentlyStartedContainerTracker.get(expTime); if (containers == null || !containers.contains(containerTokenIdentifier.getContainerID())) { return true; } else { return false; } } public synchronized void setNodeId(NodeId nodeId) { nodeHostAddr = nodeId.toString(); LOG.info("Updating node address : " + nodeHostAddr); } }
9,872
36.683206
108
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.security; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState; import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; import com.google.common.annotations.VisibleForTesting; public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager { private static final Log LOG = LogFactory .getLog(NMTokenSecretManagerInNM.class); private MasterKeyData previousMasterKey; private final Map<ApplicationAttemptId, MasterKeyData> oldMasterKeys; private final Map<ApplicationId, List<ApplicationAttemptId>> appToAppAttemptMap; private final NMStateStoreService stateStore; private NodeId nodeId; public NMTokenSecretManagerInNM() { this(new NMNullStateStoreService()); } public NMTokenSecretManagerInNM(NMStateStoreService stateStore) { this.oldMasterKeys = new HashMap<ApplicationAttemptId, MasterKeyData>(); appToAppAttemptMap = new HashMap<ApplicationId, List<ApplicationAttemptId>>(); this.stateStore = stateStore; } public synchronized void recover() throws IOException { RecoveredNMTokensState state = stateStore.loadNMTokensState(); MasterKey key = state.getCurrentMasterKey(); if (key != null) { super.currentMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array())); } key = state.getPreviousMasterKey(); if (key != null) { previousMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array())); } // restore the serial number from the current master key if (super.currentMasterKey != null) { super.serialNo = super.currentMasterKey.getMasterKey().getKeyId() + 1; } for (Map.Entry<ApplicationAttemptId, MasterKey> entry : state.getApplicationMasterKeys().entrySet()) { key = entry.getValue(); oldMasterKeys.put(entry.getKey(), new MasterKeyData(key, createSecretKey(key.getBytes().array()))); } // reconstruct app to app attempts map appToAppAttemptMap.clear(); for (ApplicationAttemptId attempt : oldMasterKeys.keySet()) { ApplicationId app = attempt.getApplicationId(); List<ApplicationAttemptId> attempts = appToAppAttemptMap.get(app); if (attempts == null) { attempts = new ArrayList<ApplicationAttemptId>(); appToAppAttemptMap.put(app, attempts); } attempts.add(attempt); } } private void updateCurrentMasterKey(MasterKeyData key) { super.currentMasterKey = key; try { stateStore.storeNMTokenCurrentMasterKey(key.getMasterKey()); } catch (IOException e) { LOG.error("Unable to update current master key in state store", e); } } private void updatePreviousMasterKey(MasterKeyData key) { previousMasterKey = key; try { stateStore.storeNMTokenPreviousMasterKey(key.getMasterKey()); } catch (IOException e) { LOG.error("Unable to update previous master key in state store", e); } } /** * Used by NodeManagers to create a token-secret-manager with the key * obtained from the RM. This can happen during registration or when the RM * rolls the master-key and signal the NM. */ @Private public synchronized void setMasterKey(MasterKey masterKey) { // Update keys only if the key has changed. if (super.currentMasterKey == null || super.currentMasterKey.getMasterKey() .getKeyId() != masterKey.getKeyId()) { LOG.info("Rolling master-key for container-tokens, got key with id " + masterKey.getKeyId()); if (super.currentMasterKey != null) { updatePreviousMasterKey(super.currentMasterKey); } updateCurrentMasterKey(new MasterKeyData(masterKey, createSecretKey(masterKey.getBytes().array()))); } } /** * This method will be used to verify NMTokens generated by different master * keys. */ @Override public synchronized byte[] retrievePassword(NMTokenIdentifier identifier) throws InvalidToken { int keyId = identifier.getKeyId(); ApplicationAttemptId appAttemptId = identifier.getApplicationAttemptId(); /* * MasterKey used for retrieving password will be as follows. 1) By default * older saved master key will be used. 2) If identifier's master key id * matches that of previous master key id then previous key will be used. 3) * If identifier's master key id matches that of current master key id then * current key will be used. */ MasterKeyData oldMasterKey = oldMasterKeys.get(appAttemptId); MasterKeyData masterKeyToUse = oldMasterKey; if (previousMasterKey != null && keyId == previousMasterKey.getMasterKey().getKeyId()) { masterKeyToUse = previousMasterKey; } else if (keyId == currentMasterKey.getMasterKey().getKeyId()) { masterKeyToUse = currentMasterKey; } if (nodeId != null && !identifier.getNodeId().equals(nodeId)) { throw new InvalidToken("Given NMToken for application : " + appAttemptId.toString() + " is not valid for current node manager." + "expected : " + nodeId.toString() + " found : " + identifier.getNodeId().toString()); } if (masterKeyToUse != null) { byte[] password = retrivePasswordInternal(identifier, masterKeyToUse); LOG.debug("NMToken password retrieved successfully!!"); return password; } throw new InvalidToken("Given NMToken for application : " + appAttemptId.toString() + " seems to have been generated illegally."); } public synchronized void appFinished(ApplicationId appId) { List<ApplicationAttemptId> appAttemptList = appToAppAttemptMap.get(appId); if (appAttemptList != null) { LOG.debug("Removing application attempts NMToken keys for application " + appId); for (ApplicationAttemptId appAttemptId : appAttemptList) { removeAppAttemptKey(appAttemptId); } appToAppAttemptMap.remove(appId); } else { LOG.error("No application Attempt for application : " + appId + " started on this NM."); } } /** * This will be called by startContainer. It will add the master key into * the cache used for starting this container. This should be called before * validating the startContainer request. */ public synchronized void appAttemptStartContainer( NMTokenIdentifier identifier) throws org.apache.hadoop.security.token.SecretManager.InvalidToken { ApplicationAttemptId appAttemptId = identifier.getApplicationAttemptId(); if (!appToAppAttemptMap.containsKey(appAttemptId.getApplicationId())) { // First application attempt for the given application appToAppAttemptMap.put(appAttemptId.getApplicationId(), new ArrayList<ApplicationAttemptId>()); } MasterKeyData oldKey = oldMasterKeys.get(appAttemptId); if (oldKey == null) { // This is a new application attempt. appToAppAttemptMap.get(appAttemptId.getApplicationId()).add(appAttemptId); } if (oldKey == null || oldKey.getMasterKey().getKeyId() != identifier.getKeyId()) { // Update key only if it is modified. LOG.debug("NMToken key updated for application attempt : " + identifier.getApplicationAttemptId().toString()); if (identifier.getKeyId() == currentMasterKey.getMasterKey() .getKeyId()) { updateAppAttemptKey(appAttemptId, currentMasterKey); } else if (previousMasterKey != null && identifier.getKeyId() == previousMasterKey.getMasterKey() .getKeyId()) { updateAppAttemptKey(appAttemptId, previousMasterKey); } else { throw new InvalidToken( "Older NMToken should not be used while starting the container."); } } } public synchronized void setNodeId(NodeId nodeId) { LOG.debug("updating nodeId : " + nodeId); this.nodeId = nodeId; } @Private @VisibleForTesting public synchronized boolean isAppAttemptNMTokenKeyPresent(ApplicationAttemptId appAttemptId) { return oldMasterKeys.containsKey(appAttemptId); } @Private @VisibleForTesting public synchronized NodeId getNodeId() { return this.nodeId; } private void updateAppAttemptKey(ApplicationAttemptId attempt, MasterKeyData key) { this.oldMasterKeys.put(attempt, key); try { stateStore.storeNMTokenApplicationMasterKey(attempt, key.getMasterKey()); } catch (IOException e) { LOG.error("Unable to store master key for application " + attempt, e); } } private void removeAppAttemptKey(ApplicationAttemptId attempt) { this.oldMasterKeys.remove(attempt); try { stateStore.removeNMTokenApplicationMasterKey(attempt); } catch (IOException e) { LOG.error("Unable to remove master key for application " + attempt, e); } } }
10,626
36.953571
101
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.security.authorize; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; /** * {@link PolicyProvider} for YARN NodeManager protocols. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class NMPolicyProvider extends PolicyProvider { private static final Service[] nodeManagerServices = new Service[] { new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL, ContainerManagementProtocolPB.class), new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER, LocalizationProtocolPB.class) }; @Override public Service[] getServices() { return nodeManagerServices; } }
1,943
37.88
93
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/JAXBContextResolver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import java.util.Set; import java.util.HashSet; import java.util.Arrays; import com.sun.jersey.api.json.JSONConfiguration; import com.sun.jersey.api.json.JSONJAXBContext; import com.google.inject.Singleton; import javax.ws.rs.ext.ContextResolver; import javax.ws.rs.ext.Provider; import javax.xml.bind.JAXBContext; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.webapp.RemoteExceptionData; @Singleton @Provider public class JAXBContextResolver implements ContextResolver<JAXBContext> { private JAXBContext context; private final Set<Class> types; // you have to specify all the dao classes here private final Class[] cTypes = {AppInfo.class, AppsInfo.class, ContainerInfo.class, ContainersInfo.class, NodeInfo.class, RemoteExceptionData.class}; public JAXBContextResolver() throws Exception { this.types = new HashSet<Class>(Arrays.asList(cTypes)); // sets the json configuration so that the json output looks like // the xml output this.context = new JSONJAXBContext(JSONConfiguration.natural(). rootUnwrapping(false).build(), cTypes); } @Override public JAXBContext getContext(Class<?> objectType) { return (types.contains(objectType)) ? context : null; } }
2,420
36.246154
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import java.util.Map.Entry; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class AllApplicationsPage extends NMView { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); setTitle("Applications running on this node"); set(DATATABLES_ID, "applications"); set(initID(DATATABLES, "applications"), appsTableInit()); setTableStyles(html, "applications"); } private String appsTableInit() { return tableInit(). // Sort by id upon page load append(", aaSorting: [[0, 'asc']]"). // applicationid, applicationstate append(", aoColumns:[null, null]} ").toString(); } @Override protected Class<? extends SubView> content() { return AllApplicationsBlock.class; } public static class AllApplicationsBlock extends HtmlBlock implements YarnWebParams { private final Context nmContext; @Inject public AllApplicationsBlock(Context nmContext) { this.nmContext = nmContext; } @Override protected void render(Block html) { TBODY<TABLE<BODY<Hamlet>>> tableBody = html .body() .table("#applications") .thead() .tr() .td()._("ApplicationId")._() .td()._("ApplicationState")._() ._() ._() .tbody(); for (Entry<ApplicationId, Application> entry : this.nmContext .getApplications().entrySet()) { AppInfo info = new AppInfo(entry.getValue()); tableBody .tr() .td().a(url("application", info.getId()), info.getId())._() .td()._(info.getState()) ._() ._(); } tableBody._()._()._(); } } }
3,532
33.300971
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AggregatedLogsBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
807
43.888889
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout; public class NMView extends TwoColumnLayout { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); } protected void commonPreHead(Page.HTML<_> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } @Override protected Class<? extends SubView> nav() { return NavBlock.class; } }
1,546
34.159091
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import java.io.IOException; import java.io.PrintWriter; import javax.inject.Inject; import javax.inject.Singleton; import javax.servlet.FilterChain; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.webapp.Controller.RequestContext; import com.google.inject.Injector; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; @Singleton public class NMWebAppFilter extends GuiceContainer{ private Injector injector; private Context nmContext; private static final long serialVersionUID = 1L; @Inject public NMWebAppFilter(Injector injector, Context nmContext) { super(injector); this.injector = injector; this.nmContext = nmContext; } @Override public void doFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws IOException, ServletException { String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI()); String redirectPath = containerLogPageRedirectPath(uri); if (redirectPath != null) { String redirectMsg = "Redirecting to log server" + " : " + redirectPath; PrintWriter out = response.getWriter(); out.println(redirectMsg); response.setHeader("Location", redirectPath); response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT); return; } super.doFilter(request, response, chain); } private String containerLogPageRedirectPath(String uri) { String redirectPath = null; if (!uri.contains("/ws/v1/node") && uri.contains("/containerlogs")) { String[] parts = uri.split("/"); String containerIdStr = parts[3]; String appOwner = parts[4]; if (containerIdStr != null && !containerIdStr.isEmpty()) { ContainerId containerId = null; try { containerId = ContainerId.fromString(containerIdStr); } catch (IllegalArgumentException ex) { return redirectPath; } ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId(); Application app = nmContext.getApplications().get(appId); Configuration nmConf = nmContext.getLocalDirsHandler().getConfig(); if (app == null && nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { String logServerUrl = nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL); if (logServerUrl != null && !logServerUrl.isEmpty()) { StringBuilder sb = new StringBuilder(); sb.append(logServerUrl); sb.append("/"); sb.append(nmContext.getNodeId().toString()); sb.append("/"); sb.append(containerIdStr); sb.append("/"); sb.append(containerIdStr); sb.append("/"); sb.append(appOwner); redirectPath = sb.toString(); } else { injector.getInstance(RequestContext.class).set( ContainerLogsPage.REDIRECT_URL, "false"); } } } } return redirectPath; } }
4,477
36.630252
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.util.StringHelper.pajoin; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; public class WebServer extends AbstractService { private static final Log LOG = LogFactory.getLog(WebServer.class); private final Context nmContext; private final NMWebApp nmWebApp; private WebApp webApp; private int port; public WebServer(Context nmContext, ResourceView resourceView, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { super(WebServer.class.getName()); this.nmContext = nmContext; this.nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler); } @Override protected void serviceStart() throws Exception { String bindAddress = WebAppUtils.getWebAppBindURL(getConfig(), YarnConfiguration.NM_BIND_HOST, WebAppUtils.getNMWebAppURLWithoutScheme(getConfig())); LOG.info("Instantiating NMWebApp at " + bindAddress); try { this.webApp = WebApps .$for("node", Context.class, this.nmContext, "ws") .at(bindAddress) .with(getConfig()) .withHttpSpnegoPrincipalKey( YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY) .withHttpSpnegoKeytabKey( YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) .start(this.nmWebApp); this.port = this.webApp.httpServer().getConnectorAddress(0).getPort(); } catch (Exception e) { String msg = "NMWebapps failed to start."; LOG.error(msg, e); throw new YarnRuntimeException(msg, e); } super.serviceStart(); } public int getPort() { return this.port; } @Override protected void serviceStop() throws Exception { if (this.webApp != null) { LOG.debug("Stopping webapp"); this.webApp.stop(); } super.serviceStop(); } public static class NMWebApp extends WebApp implements YarnWebParams { private final ResourceView resourceView; private final ApplicationACLsManager aclsManager; private final LocalDirsHandlerService dirsHandler; public NMWebApp(ResourceView resourceView, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler) { this.resourceView = resourceView; this.aclsManager = aclsManager; this.dirsHandler = dirsHandler; } @Override public void setup() { bind(NMWebServices.class); bind(GenericExceptionHandler.class); bind(JAXBContextResolver.class); bind(ResourceView.class).toInstance(this.resourceView); bind(ApplicationACLsManager.class).toInstance(this.aclsManager); bind(LocalDirsHandlerService.class).toInstance(dirsHandler); route("/", NMController.class, "info"); route("/node", NMController.class, "node"); route("/allApplications", NMController.class, "allApplications"); route("/allContainers", NMController.class, "allContainers"); route(pajoin("/application", APPLICATION_ID), NMController.class, "application"); route(pajoin("/container", CONTAINER_ID), NMController.class, "container"); route( pajoin("/containerlogs", CONTAINER_ID, APP_OWNER, CONTAINER_LOG_TYPE), NMController.class, "logs"); route("/errors-and-warnings", NMController.class, "errorsAndWarnings"); } @Override protected Class<? extends GuiceContainer> getWebAppFilterClass() { return NMWebAppFilter.class; } } }
5,170
36.201439
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.server.webapp.ErrorsAndWarningsBlock; import org.apache.hadoop.yarn.webapp.view.HtmlPage; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class NMErrorsAndWarningsPage extends NMView { @Override protected Class<? extends SubView> content() { return ErrorsAndWarningsBlock.class; } @Override protected void preHead(HtmlPage.Page.HTML<HtmlPage._> html) { commonPreHead(html); String title = "Errors and Warnings in the NodeManager"; setTitle(title); String tableId = "messages"; set(DATATABLES_ID, tableId); set(initID(DATATABLES, tableId), tablesInit()); setTableStyles(html, tableId, ".message {width:50em}", ".count {width:8em}", ".lasttime {width:16em}"); } private String tablesInit() { StringBuilder b = tableInit().append(", aoColumnDefs: ["); b.append("{'sType': 'string', 'aTargets': [ 0 ]}"); b.append(", {'sType': 'string', 'bSearchable': true, 'aTargets': [ 1 ]}"); b.append(", {'sType': 'numeric', 'bSearchable': false, 'aTargets': [ 2 ]}"); b.append(", {'sType': 'date', 'aTargets': [ 3 ] }]"); b.append(", aaSorting: [[3, 'desc']]}"); return b.toString(); } }
2,123
36.928571
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; import com.google.inject.Inject; public class ContainerPage extends NMView implements YarnWebParams { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); setTitle("Container " + $(CONTAINER_ID)); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } @Override protected Class<? extends SubView> content() { return ContainerBlock.class; } public static class ContainerBlock extends HtmlBlock implements YarnWebParams { private final Context nmContext; @Inject public ContainerBlock(Context nmContext) { this.nmContext = nmContext; } @Override protected void render(Block html) { ContainerId containerID; try { containerID = ConverterUtils.toContainerId($(CONTAINER_ID)); } catch (IllegalArgumentException e) { html.p()._("Invalid containerId " + $(CONTAINER_ID))._(); return; } DIV<Hamlet> div = html.div("#content"); Container container = this.nmContext.getContainers().get(containerID); if (container == null) { div.h1("Unknown Container. Container might have completed, " + "please go back to the previous page and retry.")._(); return; } ContainerInfo info = new ContainerInfo(this.nmContext, container); info("Container information") ._("ContainerID", info.getId()) ._("ContainerState", info.getState()) ._("ExitStatus", info.getExitStatus()) ._("Diagnostics", info.getDiagnostics()) ._("User", info.getUser()) ._("TotalMemoryNeeded", info.getMemoryNeeded()) ._("TotalVCoresNeeded", info.getVCoresNeeded()) ._("logs", info.getShortLogLink(), "Link to logs"); html._(InfoBlock.class); } } }
3,416
35.351064
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.Charset; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class ContainerLogsPage extends NMView { public static final String REDIRECT_URL = "redirect.url"; @Override protected void preHead(Page.HTML<_> html) { String redirectUrl = $(REDIRECT_URL); if (redirectUrl == null || redirectUrl.isEmpty()) { set(TITLE, join("Logs for ", $(CONTAINER_ID))); } else { if (redirectUrl.equals("false")) { set(TITLE, join("Failed redirect for ", $(CONTAINER_ID))); //Error getting redirect url. Fall through. } } set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } @Override protected Class<? extends SubView> content() { return ContainersLogsBlock.class; } public static class ContainersLogsBlock extends HtmlBlock implements YarnWebParams { private final Context nmContext; @Inject public ContainersLogsBlock(Context context) { this.nmContext = context; } @Override protected void render(Block html) { String redirectUrl = $(REDIRECT_URL); if (redirectUrl !=null && redirectUrl.equals("false")) { html.h1("Failed while trying to construct the redirect url to the log" + " server. Log Server url may not be configured"); //Intentional fallthrough. } ContainerId containerId; try { containerId = ConverterUtils.toContainerId($(CONTAINER_ID)); } catch (IllegalArgumentException ex) { html.h1("Invalid container ID: " + $(CONTAINER_ID)); return; } try { if ($(CONTAINER_LOG_TYPE).isEmpty()) { List<File> logFiles = ContainerLogsUtils.getContainerLogDirs(containerId, request().getRemoteUser(), nmContext); printLogFileDirectory(html, logFiles); } else { File logFile = ContainerLogsUtils.getContainerLogFile(containerId, $(CONTAINER_LOG_TYPE), request().getRemoteUser(), nmContext); printLogFile(html, logFile); } } catch (YarnException ex) { html.h1(ex.getMessage()); } catch (NotFoundException ex) { html.h1(ex.getMessage()); } } private void printLogFile(Block html, File logFile) { long start = $("start").isEmpty() ? -4 * 1024 : Long.parseLong($("start")); start = start < 0 ? logFile.length() + start : start; start = start < 0 ? 0 : start; long end = $("end").isEmpty() ? logFile.length() : Long.parseLong($("end")); end = end < 0 ? logFile.length() + end : end; end = end < 0 ? logFile.length() : end; if (start > end) { html.h1("Invalid start and end values. Start: [" + start + "]" + ", end[" + end + "]"); return; } else { FileInputStream logByteStream = null; try { logByteStream = ContainerLogsUtils.openLogFileForRead($(CONTAINER_ID), logFile, nmContext); } catch (IOException ex) { html.h1(ex.getMessage()); return; } try { long toRead = end - start; if (toRead < logFile.length()) { html.p()._("Showing " + toRead + " bytes. Click ") .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), logFile.getName(), "?start=0"), "here"). _(" for full log")._(); } IOUtils.skipFully(logByteStream, start); InputStreamReader reader = new InputStreamReader(logByteStream, Charset.forName("UTF-8")); int bufferSize = 65536; char[] cbuf = new char[bufferSize]; int len = 0; int currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; PRE<Hamlet> pre = html.pre(); while ((len = reader.read(cbuf, 0, currentToRead)) > 0 && toRead > 0) { pre._(new String(cbuf, 0, len)); toRead = toRead - len; currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; } pre._(); reader.close(); } catch (IOException e) { LOG.error( "Exception reading log file " + logFile.getAbsolutePath(), e); html.h1("Exception reading log file. It might be because log " + "file was aggregated : " + logFile.getName()); } finally { if (logByteStream != null) { try { logByteStream.close(); } catch (IOException e) { // Ignore } } } } } private void printLogFileDirectory(Block html, List<File> containerLogsDirs) { // Print out log types in lexical order Collections.sort(containerLogsDirs); boolean foundLogFile = false; for (File containerLogsDir : containerLogsDirs) { File[] logFiles = containerLogsDir.listFiles(); if (logFiles != null) { Arrays.sort(logFiles); for (File logFile : logFiles) { foundLogFile = true; html.p() .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), logFile.getName(), "?start=-4096"), logFile.getName() + " : Total file length is " + logFile.length() + " bytes.")._(); } } } if (!foundLogFile) { html.h1("No logs available for container " + $(CONTAINER_ID)); return; } } } }
7,545
34.42723
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import java.util.Date; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; import com.google.inject.Inject; public class NodePage extends NMView { private static final long BYTES_IN_MB = 1024 * 1024; @Override protected void commonPreHead(HTML<_> html) { super.commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}"); } @Override protected Class<? extends SubView> content() { return NodeBlock.class; } public static class NodeBlock extends HtmlBlock { private final Context context; private final ResourceView resourceView; @Inject public NodeBlock(Context context, ResourceView resourceView) { this.context = context; this.resourceView = resourceView; } @Override protected void render(Block html) { NodeInfo info = new NodeInfo(this.context, this.resourceView); info("NodeManager information") ._("Total Vmem allocated for Containers", StringUtils.byteDesc(info.getTotalVmemAllocated() * BYTES_IN_MB)) ._("Vmem enforcement enabled", info.isVmemCheckEnabled()) ._("Total Pmem allocated for Container", StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB)) ._("Pmem enforcement enabled", info.isPmemCheckEnabled()) ._("Total VCores allocated for Containers", String.valueOf(info.getTotalVCoresAllocated())) ._("NodeHealthyStatus", info.getHealthStatus()) ._("LastNodeHealthTime", new Date( info.getLastNodeUpdateTime())) ._("NodeHealthReport", info.getHealthReport()) ._("NodeManager started on", new Date( info.getNMStartupTime())) ._("NodeManager Version:", info.getNMBuildVersion() + " on " + info.getNMVersionBuiltOn()) ._("Hadoop Version:", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn()); html._(InfoBlock.class); } } }
3,435
35.553191
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.util.StringHelper.join; import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.YarnWebParams; import com.google.inject.Inject; public class NMController extends Controller implements YarnWebParams { @Inject public NMController(RequestContext requestContext) { super(requestContext); } @Override // TODO: What use of this with info() in? public void index() { setTitle(join("NodeManager - ", $(NM_NODENAME))); } public void info() { render(NodePage.class); } public void node() { render(NodePage.class); } public void allApplications() { render(AllApplicationsPage.class); } public void allContainers() { render(AllContainersPage.class); } public void application() { render(ApplicationPage.class); } public void container() { render(ContainerPage.class); } public void errorsAndWarnings() { render(NMErrorsAndWarningsPage.class); } public void logs() { render(ContainerLogsPage.class); } }
1,914
25.232877
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.OutputStream; import java.util.Map.Entry; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.UriInfo; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.BadRequestException; import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.WebApp; import com.google.inject.Inject; import com.google.inject.Singleton; @Singleton @Path("/ws/v1/node") public class NMWebServices { private Context nmContext; private ResourceView rview; private WebApp webapp; private static RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); private @javax.ws.rs.core.Context HttpServletRequest request; private @javax.ws.rs.core.Context HttpServletResponse response; @javax.ws.rs.core.Context UriInfo uriInfo; @Inject public NMWebServices(final Context nm, final ResourceView view, final WebApp webapp) { this.nmContext = nm; this.rview = view; this.webapp = webapp; } private void init() { //clear content type response.setContentType(null); } @GET @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public NodeInfo get() { return getNodeInfo(); } @GET @Path("/info") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public NodeInfo getNodeInfo() { init(); return new NodeInfo(this.nmContext, this.rview); } @GET @Path("/apps") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AppsInfo getNodeApps(@QueryParam("state") String stateQuery, @QueryParam("user") String userQuery) { init(); AppsInfo allApps = new AppsInfo(); for (Entry<ApplicationId, Application> entry : this.nmContext .getApplications().entrySet()) { AppInfo appInfo = new AppInfo(entry.getValue()); if (stateQuery != null && !stateQuery.isEmpty()) { ApplicationState.valueOf(stateQuery); if (!appInfo.getState().equalsIgnoreCase(stateQuery)) { continue; } } if (userQuery != null) { if (userQuery.isEmpty()) { String msg = "Error: You must specify a non-empty string for the user"; throw new BadRequestException(msg); } if (!appInfo.getUser().toString().equals(userQuery)) { continue; } } allApps.add(appInfo); } return allApps; } @GET @Path("/apps/{appid}") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AppInfo getNodeApp(@PathParam("appid") String appId) { init(); ApplicationId id = ConverterUtils.toApplicationId(recordFactory, appId); if (id == null) { throw new NotFoundException("app with id " + appId + " not found"); } Application app = this.nmContext.getApplications().get(id); if (app == null) { throw new NotFoundException("app with id " + appId + " not found"); } return new AppInfo(app); } @GET @Path("/containers") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public ContainersInfo getNodeContainers() { init(); ContainersInfo allContainers = new ContainersInfo(); for (Entry<ContainerId, Container> entry : this.nmContext.getContainers() .entrySet()) { if (entry.getValue() == null) { // just skip it continue; } ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue(), uriInfo.getBaseUri().toString(), webapp.name()); allContainers.add(info); } return allContainers; } @GET @Path("/containers/{containerid}") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public ContainerInfo getNodeContainer(@PathParam("containerid") String id) { ContainerId containerId = null; init(); try { containerId = ConverterUtils.toContainerId(id); } catch (Exception e) { throw new BadRequestException("invalid container id, " + id); } Container container = nmContext.getContainers().get(containerId); if (container == null) { throw new NotFoundException("container with id, " + id + ", not found"); } return new ContainerInfo(this.nmContext, container, uriInfo.getBaseUri() .toString(), webapp.name()); } /** * Returns the contents of a container's log file in plain text. * * Only works for containers that are still in the NodeManager's memory, so * logs are no longer available after the corresponding application is no * longer running. * * @param containerIdStr * The container ID * @param filename * The name of the log file * @return * The contents of the container's log file */ @GET @Path("/containerlogs/{containerid}/{filename}") @Produces({ MediaType.TEXT_PLAIN }) @Public @Unstable public Response getLogs(@PathParam("containerid") String containerIdStr, @PathParam("filename") String filename) { ContainerId containerId; try { containerId = ConverterUtils.toContainerId(containerIdStr); } catch (IllegalArgumentException ex) { return Response.status(Status.BAD_REQUEST).build(); } File logFile = null; try { logFile = ContainerLogsUtils.getContainerLogFile( containerId, filename, request.getRemoteUser(), nmContext); } catch (NotFoundException ex) { return Response.status(Status.NOT_FOUND).entity(ex.getMessage()).build(); } catch (YarnException ex) { return Response.serverError().entity(ex.getMessage()).build(); } try { final FileInputStream fis = ContainerLogsUtils.openLogFileForRead( containerIdStr, logFile, nmContext); StreamingOutput stream = new StreamingOutput() { @Override public void write(OutputStream os) throws IOException, WebApplicationException { int bufferSize = 65536; byte[] buf = new byte[bufferSize]; int len; while ((len = fis.read(buf, 0, bufferSize)) > 0) { os.write(buf, 0, len); } os.flush(); } }; return Response.ok(stream).build(); } catch (IOException ex) { return Response.serverError().entity(ex.getMessage()).build(); } } }
8,861
33.084615
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; import com.google.inject.Inject; public class ApplicationPage extends NMView implements YarnWebParams { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); set(DATATABLES_ID, "containers"); set(initID(DATATABLES, "containers"), containersTableInit()); setTableStyles(html, "containers"); } private String containersTableInit() { return tableInit().append(",aoColumns:[null]}").toString(); } @Override protected Class<? extends SubView> content() { return ApplicationBlock.class; } public static class ApplicationBlock extends HtmlBlock implements YarnWebParams { private final Context nmContext; private final Configuration conf; private final RecordFactory recordFactory; @Inject public ApplicationBlock(Context nmContext, Configuration conf) { this.conf = conf; this.nmContext = nmContext; this.recordFactory = RecordFactoryProvider.getRecordFactory(this.conf); } @Override protected void render(Block html) { ApplicationId applicationID = ConverterUtils.toApplicationId(this.recordFactory, $(APPLICATION_ID)); Application app = this.nmContext.getApplications().get(applicationID); AppInfo info = new AppInfo(app); info("Application's information") ._("ApplicationId", info.getId()) ._("ApplicationState", info.getState()) ._("User", info.getUser()); TABLE<Hamlet> containersListBody = html._(InfoBlock.class) .table("#containers"); for (String containerIdStr : info.getContainers()) { containersListBody .tr().td() .a(url("container", containerIdStr), containerIdStr) ._()._(); } containersListBody._(); } } }
3,764
37.030303
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.SecureIOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.NotFoundException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Contains utilities for fetching a user's log file in a secure fashion. */ public class ContainerLogsUtils { public static final Logger LOG = LoggerFactory.getLogger(ContainerLogsUtils.class); /** * Finds the local directories that logs for the given container are stored * on. */ public static List<File> getContainerLogDirs(ContainerId containerId, String remoteUser, Context context) throws YarnException { Container container = context.getContainers().get(containerId); Application application = getApplicationForContainer(containerId, context); checkAccess(remoteUser, application, context); // It is not required to have null check for container ( container == null ) // and throw back exception.Because when container is completed, NodeManager // remove container information from its NMContext.Configuring log // aggregation to false, container log view request is forwarded to NM. NM // does not have completed container information,but still NM serve request for // reading container logs. if (container != null) { checkState(container.getContainerState()); } return getContainerLogDirs(containerId, context.getLocalDirsHandler()); } static List<File> getContainerLogDirs(ContainerId containerId, LocalDirsHandlerService dirsHandler) throws YarnException { List<String> logDirs = dirsHandler.getLogDirsForRead(); List<File> containerLogDirs = new ArrayList<File>(logDirs.size()); for (String logDir : logDirs) { logDir = new File(logDir).toURI().getPath(); String appIdStr = ConverterUtils.toString(containerId .getApplicationAttemptId().getApplicationId()); File appLogDir = new File(logDir, appIdStr); containerLogDirs.add(new File(appLogDir, containerId.toString())); } return containerLogDirs; } /** * Finds the log file with the given filename for the given container. */ public static File getContainerLogFile(ContainerId containerId, String fileName, String remoteUser, Context context) throws YarnException { Container container = context.getContainers().get(containerId); Application application = getApplicationForContainer(containerId, context); checkAccess(remoteUser, application, context); if (container != null) { checkState(container.getContainerState()); } try { LocalDirsHandlerService dirsHandler = context.getLocalDirsHandler(); String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir( application.getAppId().toString(), containerId.toString()); Path logPath = dirsHandler.getLogPathToRead( relativeContainerLogDir + Path.SEPARATOR + fileName); URI logPathURI = new File(logPath.toString()).toURI(); File logFile = new File(logPathURI.getPath()); return logFile; } catch (IOException e) { LOG.warn("Failed to find log file", e); throw new NotFoundException("Cannot find this log on the local disk."); } } private static Application getApplicationForContainer(ContainerId containerId, Context context) { ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); Application application = context.getApplications().get( applicationId); if (application == null) { throw new NotFoundException( "Unknown container. Container either has not started or " + "has already completed or " + "doesn't belong to this node at all."); } return application; } private static void checkAccess(String remoteUser, Application application, Context context) throws YarnException { UserGroupInformation callerUGI = null; if (remoteUser != null) { callerUGI = UserGroupInformation.createRemoteUser(remoteUser); } if (callerUGI != null && !context.getApplicationACLsManager().checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, application.getUser(), application.getAppId())) { throw new YarnException( "User [" + remoteUser + "] is not authorized to view the logs for application " + application.getAppId()); } } private static void checkState(ContainerState state) { if (state == ContainerState.NEW || state == ContainerState.LOCALIZING || state == ContainerState.LOCALIZED) { throw new NotFoundException("Container is not yet running. Current state is " + state); } if (state == ContainerState.LOCALIZATION_FAILED) { throw new NotFoundException("Container wasn't started. Localization failed."); } } public static FileInputStream openLogFileForRead(String containerIdStr, File logFile, Context context) throws IOException { ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); String user = context.getApplications().get( applicationId).getUser(); try { return SecureIOUtils.openForRead(logFile, user, null); } catch (IOException e) { if (e.getMessage().contains( "did not match expected owner '" + user + "'")) { LOG.error( "Exception reading log file " + logFile.getAbsolutePath(), e); throw new IOException("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : " + logFile.getName(), e); } else { throw new IOException("Exception reading log file. It might be because log " + "file was aggregated : " + logFile.getName(), e); } } } }
7,871
40.87234
91
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import java.util.Map.Entry; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class AllContainersPage extends NMView { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); setTitle("All containers running on this node"); set(DATATABLES_ID, "containers"); set(initID(DATATABLES, "containers"), containersTableInit()); setTableStyles(html, "containers"); } private String containersTableInit() { return tableInit(). // containerid, containerid, log-url append(", aoColumns:[null, null, {bSearchable:false}]} ").toString(); } @Override protected Class<? extends SubView> content() { return AllContainersBlock.class; } public static class AllContainersBlock extends HtmlBlock implements YarnWebParams { private final Context nmContext; @Inject public AllContainersBlock(Context nmContext) { this.nmContext = nmContext; } @Override protected void render(Block html) { TBODY<TABLE<BODY<Hamlet>>> tableBody = html.body() .table("#containers") .thead() .tr() .td()._("ContainerId")._() .td()._("ContainerState")._() .td()._("logs")._() ._() ._().tbody(); for (Entry<ContainerId, Container> entry : this.nmContext .getContainers().entrySet()) { ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue()); tableBody .tr() .td().a(url("container", info.getId()), info.getId()) ._() .td()._(info.getState())._() .td() .a(url(info.getShortLogLink()), "logs")._() ._(); } tableBody._()._()._(); } } }
3,535
34.009901
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AggregatedLogsPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
807
43.888889
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class NavBlock extends HtmlBlock implements YarnWebParams { private Configuration conf; @Inject public NavBlock(Configuration conf) { this.conf = conf; } @Override protected void render(Block html) { boolean addErrorsAndWarningsLink = false; Log log = LogFactory.getLog(NMErrorsAndWarningsPage.class); if (log instanceof Log4JLogger) { Log4jWarningErrorMetricsAppender appender = Log4jWarningErrorMetricsAppender.findAppender(); if (appender != null) { addErrorsAndWarningsLink = true; } } String RMWebAppURL = WebAppUtils.getResolvedRMWebAppURLWithScheme(this.conf); Hamlet.UL<Hamlet.DIV<Hamlet>> ul = html .div("#nav") .h3()._("ResourceManager")._() .ul() .li().a(RMWebAppURL, "RM Home")._()._() .h3()._("NodeManager")._() // TODO: Problem if no header like this .ul() .li() .a(url("node"), "Node Information")._() .li() .a(url("allApplications"), "List of Applications") ._() .li() .a(url("allContainers"), "List of Containers")._() ._() .h3("Tools") .ul() .li().a("/conf", "Configuration")._() .li().a("/logs", "Local logs")._() .li().a("/stacks", "Server stacks")._() .li().a("/jmx?qry=Hadoop:*", "Server metrics")._(); if (addErrorsAndWarningsLink) { ul.li().a(url("errors-and-warnings"), "Errors/Warnings")._(); } ul._()._(); } }
2,877
33.261905
98
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.util.YarnVersionInfo; @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class NodeInfo { private static final long BYTES_IN_MB = 1024 * 1024; protected String healthReport; protected long totalVmemAllocatedContainersMB; protected long totalPmemAllocatedContainersMB; protected long totalVCoresAllocatedContainers; protected boolean vmemCheckEnabled; protected boolean pmemCheckEnabled; protected long lastNodeUpdateTime; protected boolean nodeHealthy; protected String nodeManagerVersion; protected String nodeManagerBuildVersion; protected String nodeManagerVersionBuiltOn; protected String hadoopVersion; protected String hadoopBuildVersion; protected String hadoopVersionBuiltOn; protected String id; protected String nodeHostName; protected long nmStartupTime; public NodeInfo() { } // JAXB needs this public NodeInfo(final Context context, final ResourceView resourceView) { this.id = context.getNodeId().toString(); this.nodeHostName = context.getNodeId().getHost(); this.totalVmemAllocatedContainersMB = resourceView .getVmemAllocatedForContainers() / BYTES_IN_MB; this.vmemCheckEnabled = resourceView.isVmemCheckEnabled(); this.totalPmemAllocatedContainersMB = resourceView .getPmemAllocatedForContainers() / BYTES_IN_MB; this.pmemCheckEnabled = resourceView.isPmemCheckEnabled(); this.totalVCoresAllocatedContainers = resourceView .getVCoresAllocatedForContainers(); this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy(); this.lastNodeUpdateTime = context.getNodeHealthStatus() .getLastHealthReportTime(); this.healthReport = context.getNodeHealthStatus().getHealthReport(); this.nodeManagerVersion = YarnVersionInfo.getVersion(); this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion(); this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate(); this.hadoopVersion = VersionInfo.getVersion(); this.hadoopBuildVersion = VersionInfo.getBuildVersion(); this.hadoopVersionBuiltOn = VersionInfo.getDate(); this.nmStartupTime = NodeManager.getNMStartupTime(); } public String getNodeId() { return this.id; } public String getNodeHostName() { return this.nodeHostName; } public String getNMVersion() { return this.nodeManagerVersion; } public String getNMBuildVersion() { return this.nodeManagerBuildVersion; } public String getNMVersionBuiltOn() { return this.nodeManagerVersionBuiltOn; } public String getHadoopVersion() { return this.hadoopVersion; } public String getHadoopBuildVersion() { return this.hadoopBuildVersion; } public String getHadoopVersionBuiltOn() { return this.hadoopVersionBuiltOn; } public boolean getHealthStatus() { return this.nodeHealthy; } public long getLastNodeUpdateTime() { return this.lastNodeUpdateTime; } public String getHealthReport() { return this.healthReport; } public long getTotalVmemAllocated() { return this.totalVmemAllocatedContainersMB; } public long getTotalVCoresAllocated() { return this.totalVCoresAllocatedContainers; } public boolean isVmemCheckEnabled() { return this.vmemCheckEnabled; } public long getTotalPmemAllocated() { return this.totalPmemAllocatedContainersMB; } public boolean isPmemCheckEnabled() { return this.pmemCheckEnabled; } public long getNMStartupTime() { return nmStartupTime; } }
4,773
30.202614
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; import java.util.ArrayList; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.util.ConverterUtils; @XmlRootElement(name = "app") @XmlAccessorType(XmlAccessType.FIELD) public class AppInfo { protected String id; protected String state; protected String user; protected ArrayList<String> containerids; public AppInfo() { } // JAXB needs this public AppInfo(final Application app) { this.id = ConverterUtils.toString(app.getAppId()); this.state = app.getApplicationState().toString(); this.user = app.getUser(); this.containerids = new ArrayList<String>(); Map<ContainerId, Container> appContainers = app.getContainers(); for (ContainerId containerId : appContainers.keySet()) { String containerIdStr = ConverterUtils.toString(containerId); containerids.add(containerIdStr); } } public String getId() { return this.id; } public String getUser() { return this.user; } public String getState() { return this.state; } public ArrayList<String> getContainers() { return this.containerids; } }
2,337
30.594595
90
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "apps") @XmlAccessorType(XmlAccessType.FIELD) public class AppsInfo { protected ArrayList<AppInfo> app = new ArrayList<AppInfo>(); public AppsInfo() { } // JAXB needs this public void add(AppInfo appInfo) { app.add(appInfo); } public ArrayList<AppInfo> getApps() { return app; } }
1,378
30.340909
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.ujoin; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @XmlRootElement(name = "container") @XmlAccessorType(XmlAccessType.FIELD) public class ContainerInfo { protected String id; protected String state; protected int exitCode; protected String diagnostics; protected String user; protected long totalMemoryNeededMB; protected long totalVCoresNeeded; protected String containerLogsLink; protected String nodeId; @XmlTransient protected String containerLogsShortLink; @XmlTransient protected String exitStatus; public ContainerInfo() { } // JAXB needs this public ContainerInfo(final Context nmContext, final Container container) { this(nmContext, container, "", ""); } public ContainerInfo(final Context nmContext, final Container container, String requestUri, String pathPrefix) { this.id = container.getContainerId().toString(); this.nodeId = nmContext.getNodeId().toString(); ContainerStatus containerData = container.cloneAndGetContainerStatus(); this.exitCode = containerData.getExitStatus(); this.exitStatus = (this.exitCode == ContainerExitStatus.INVALID) ? "N/A" : String.valueOf(exitCode); this.state = container.getContainerState().toString(); this.diagnostics = containerData.getDiagnostics(); if (this.diagnostics == null || this.diagnostics.isEmpty()) { this.diagnostics = ""; } this.user = container.getUser(); Resource res = container.getResource(); if (res != null) { this.totalMemoryNeededMB = res.getMemory(); this.totalVCoresNeeded = res.getVirtualCores(); } this.containerLogsShortLink = ujoin("containerlogs", this.id, container.getUser()); if (requestUri == null) { requestUri = ""; } if (pathPrefix == null) { pathPrefix = ""; } this.containerLogsLink = join(requestUri, pathPrefix, this.containerLogsShortLink); } public String getId() { return this.id; } public String getNodeId() { return this.nodeId; } public String getState() { return this.state; } public int getExitCode() { return this.exitCode; } public String getExitStatus() { return this.exitStatus; } public String getDiagnostics() { return this.diagnostics; } public String getUser() { return this.user; } public String getShortLogLink() { return this.containerLogsShortLink; } public String getLogLink() { return this.containerLogsLink; } public long getMemoryNeeded() { return this.totalMemoryNeededMB; } public long getVCoresNeeded() { return this.totalVCoresNeeded; } }
4,113
28.385714
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainersInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "containers") @XmlAccessorType(XmlAccessType.FIELD) public class ContainersInfo { protected ArrayList<ContainerInfo> container = new ArrayList<ContainerInfo>(); public ContainersInfo() { } // JAXB needs this public void add(ContainerInfo containerInfo) { container.add(containerInfo); } public ArrayList<ContainerInfo> getContainers() { return container; } }
1,456
32.113636
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api; import java.io.IOException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; public interface LocalizationProtocol { public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) throws YarnException, IOException; }
1,269
41.333333
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/ResourceLocalizationSpec.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.URL; import com.google.common.annotations.VisibleForTesting; @Private @VisibleForTesting public interface ResourceLocalizationSpec { public void setResource(LocalResource rsrc); public LocalResource getResource(); public void setDestinationDirectory(URL destinationDirectory); public URL getDestinationDirectory(); }
1,359
35.756757
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService; @ProtocolInfo(protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB", protocolVersion = 1) public interface LocalizationProtocolPB extends LocalizationProtocolService.BlockingInterface { }
1,218
42.535714
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords; import java.util.List; public interface LocalizerStatus { String getLocalizerId(); void setLocalizerId(String id); List<LocalResourceStatus> getResources(); void addAllResources(List<LocalResourceStatus> resources); void addResourceStatus(LocalResourceStatus resource); LocalResourceStatus getResourceStatus(int index); void removeResource(int index); void clearResources(); }
1,259
36.058824
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/ResourceStatusType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords; public enum ResourceStatusType { FETCH_PENDING, FETCH_SUCCESS, FETCH_FAILURE, }
949
37
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerHeartbeatResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords; import java.util.List; import org.apache.hadoop.yarn.server.nodemanager.api.*; public interface LocalizerHeartbeatResponse { public LocalizerAction getLocalizerAction(); public void setLocalizerAction(LocalizerAction action); public List<ResourceLocalizationSpec> getResourceSpecs(); public void setResourceSpecs(List<ResourceLocalizationSpec> rsrcs); }
1,227
39.933333
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalizerAction.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords; public enum LocalizerAction { LIVE, DIE }
907
38.478261
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/LocalResourceStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.URL; public interface LocalResourceStatus { public LocalResource getResource(); public ResourceStatusType getStatus(); public URL getLocalPath(); public long getLocalSize(); public SerializedException getException(); public void setResource(LocalResource resource); public void setStatus(ResourceStatusType status); public void setLocalPath(URL localPath); public void setLocalSize(long size); public void setException(SerializedException exception); }
1,500
39.567568
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerStatusPBImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProtoOrBuilder; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; public class LocalizerStatusPBImpl extends ProtoBase<LocalizerStatusProto> implements LocalizerStatus { LocalizerStatusProto proto = LocalizerStatusProto.getDefaultInstance(); LocalizerStatusProto.Builder builder = null; boolean viaProto = false; private List<LocalResourceStatus> resources = null; public LocalizerStatusPBImpl() { builder = LocalizerStatusProto.newBuilder(); } public LocalizerStatusPBImpl(LocalizerStatusProto proto) { this.proto = proto; viaProto = true; } public LocalizerStatusProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } private void mergeLocalToBuilder() { if (this.resources != null) { addResourcesToProto(); } } private void mergeLocalToProto() { if (viaProto) maybeInitBuilder(); mergeLocalToBuilder(); proto = builder.build(); viaProto = true; } private void maybeInitBuilder() { if (viaProto || builder == null) { builder = LocalizerStatusProto.newBuilder(proto); } viaProto = false; } @Override public String getLocalizerId() { LocalizerStatusProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasLocalizerId()) { return null; } return (p.getLocalizerId()); } @Override public List<LocalResourceStatus> getResources() { initResources(); return this.resources; } @Override public void setLocalizerId(String localizerId) { maybeInitBuilder(); if (localizerId == null) { builder.clearLocalizerId(); return; } builder.setLocalizerId(localizerId); } private void initResources() { if (this.resources != null) { return; } LocalizerStatusProtoOrBuilder p = viaProto ? proto : builder; List<LocalResourceStatusProto> list = p.getResourcesList(); this.resources = new ArrayList<LocalResourceStatus>(); for (LocalResourceStatusProto c : list) { this.resources.add(convertFromProtoFormat(c)); } } private void addResourcesToProto() { maybeInitBuilder(); builder.clearResources(); if (this.resources == null) return; Iterable<LocalResourceStatusProto> iterable = new Iterable<LocalResourceStatusProto>() { @Override public Iterator<LocalResourceStatusProto> iterator() { return new Iterator<LocalResourceStatusProto>() { Iterator<LocalResourceStatus> iter = resources.iterator(); @Override public boolean hasNext() { return iter.hasNext(); } @Override public LocalResourceStatusProto next() { return convertToProtoFormat(iter.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; builder.addAllResources(iterable); } @Override public void addAllResources(List<LocalResourceStatus> resources) { if (resources == null) return; initResources(); this.resources.addAll(resources); } @Override public LocalResourceStatus getResourceStatus(int index) { initResources(); return this.resources.get(index); } @Override public void addResourceStatus(LocalResourceStatus resource) { initResources(); this.resources.add(resource); } @Override public void removeResource(int index) { initResources(); this.resources.remove(index); } @Override public void clearResources() { initResources(); this.resources.clear(); } private LocalResourceStatus convertFromProtoFormat(LocalResourceStatusProto p) { return new LocalResourceStatusPBImpl(p); } private LocalResourceStatusProto convertToProtoFormat(LocalResourceStatus s) { return ((LocalResourceStatusPBImpl)s).getProto(); } }
5,361
26.782383
101
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalizerHeartbeatResponsePBImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerActionProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceLocalizationSpecProto; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.ResourceLocalizationSpecPBImpl; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; public class LocalizerHeartbeatResponsePBImpl extends ProtoBase<LocalizerHeartbeatResponseProto> implements LocalizerHeartbeatResponse { LocalizerHeartbeatResponseProto proto = LocalizerHeartbeatResponseProto.getDefaultInstance(); LocalizerHeartbeatResponseProto.Builder builder = null; boolean viaProto = false; private List<ResourceLocalizationSpec> resourceSpecs; public LocalizerHeartbeatResponsePBImpl() { builder = LocalizerHeartbeatResponseProto.newBuilder(); } public LocalizerHeartbeatResponsePBImpl( LocalizerHeartbeatResponseProto proto) { this.proto = proto; viaProto = true; } public LocalizerHeartbeatResponseProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } private void mergeLocalToBuilder() { if (resourceSpecs != null) { addResourcesToProto(); } } private void mergeLocalToProto() { if (viaProto) maybeInitBuilder(); mergeLocalToBuilder(); proto = builder.build(); viaProto = true; } private void maybeInitBuilder() { if (viaProto || builder == null) { builder = LocalizerHeartbeatResponseProto.newBuilder(proto); } viaProto = false; } @Override public LocalizerAction getLocalizerAction() { LocalizerHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasAction()) { return null; } return convertFromProtoFormat(p.getAction()); } @Override public List<ResourceLocalizationSpec> getResourceSpecs() { initResources(); return this.resourceSpecs; } public void setLocalizerAction(LocalizerAction action) { maybeInitBuilder(); if (action == null) { builder.clearAction(); return; } builder.setAction(convertToProtoFormat(action)); } public void setResourceSpecs(List<ResourceLocalizationSpec> rsrcs) { maybeInitBuilder(); if (rsrcs == null) { builder.clearResources(); return; } this.resourceSpecs = rsrcs; } private void initResources() { if (this.resourceSpecs != null) { return; } LocalizerHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder; List<ResourceLocalizationSpecProto> list = p.getResourcesList(); this.resourceSpecs = new ArrayList<ResourceLocalizationSpec>(); for (ResourceLocalizationSpecProto c : list) { this.resourceSpecs.add(convertFromProtoFormat(c)); } } private void addResourcesToProto() { maybeInitBuilder(); builder.clearResources(); if (this.resourceSpecs == null) return; Iterable<ResourceLocalizationSpecProto> iterable = new Iterable<ResourceLocalizationSpecProto>() { @Override public Iterator<ResourceLocalizationSpecProto> iterator() { return new Iterator<ResourceLocalizationSpecProto>() { Iterator<ResourceLocalizationSpec> iter = resourceSpecs.iterator(); @Override public boolean hasNext() { return iter.hasNext(); } @Override public ResourceLocalizationSpecProto next() { ResourceLocalizationSpec resource = iter.next(); return ((ResourceLocalizationSpecPBImpl)resource).getProto(); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; builder.addAllResources(iterable); } private ResourceLocalizationSpec convertFromProtoFormat( ResourceLocalizationSpecProto p) { return new ResourceLocalizationSpecPBImpl(p); } private LocalizerActionProto convertToProtoFormat(LocalizerAction a) { return LocalizerActionProto.valueOf(a.name()); } private LocalizerAction convertFromProtoFormat(LocalizerActionProto a) { return LocalizerAction.valueOf(a.name()); } }
5,697
30.832402
112
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/LocalResourceStatusPBImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto; import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType; public class LocalResourceStatusPBImpl extends ProtoBase<LocalResourceStatusProto> implements LocalResourceStatus { LocalResourceStatusProto proto = LocalResourceStatusProto.getDefaultInstance(); LocalResourceStatusProto.Builder builder = null; boolean viaProto = false; private LocalResource resource; private URL localPath; private SerializedException exception; public LocalResourceStatusPBImpl() { builder = LocalResourceStatusProto.newBuilder(); } public LocalResourceStatusPBImpl(LocalResourceStatusProto proto) { this.proto = proto; viaProto = true; } public LocalResourceStatusProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } private void mergeLocalToBuilder() { if (this.resource != null && !((LocalResourcePBImpl)this.resource).getProto() .equals(builder.getResource())) { builder.setResource(convertToProtoFormat(this.resource)); } if (this.localPath != null && !((URLPBImpl)this.localPath).getProto() .equals(builder.getLocalPath())) { builder.setLocalPath(convertToProtoFormat(this.localPath)); } if (this.exception != null && !((SerializedExceptionPBImpl)this.exception).getProto() .equals(builder.getException())) { builder.setException(convertToProtoFormat(this.exception)); } } private void mergeLocalToProto() { if (viaProto) maybeInitBuilder(); mergeLocalToBuilder(); proto = builder.build(); viaProto = true; } private void maybeInitBuilder() { if (viaProto || builder == null) { builder = LocalResourceStatusProto.newBuilder(proto); } viaProto = false; } @Override public LocalResource getResource() { LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder; if (this.resource != null) { return this.resource; } if (!p.hasResource()) { return null; } this.resource = convertFromProtoFormat(p.getResource()); return this.resource; } @Override public ResourceStatusType getStatus() { LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasStatus()) { return null; } return convertFromProtoFormat(p.getStatus()); } @Override public URL getLocalPath() { LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder; if (this.localPath != null) { return this.localPath; } if (!p.hasLocalPath()) { return null; } this.localPath = convertFromProtoFormat(p.getLocalPath()); return this.localPath; } @Override public long getLocalSize() { LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder; return (p.getLocalSize()); } @Override public SerializedException getException() { LocalResourceStatusProtoOrBuilder p = viaProto ? proto : builder; if (this.exception != null) { return this.exception; } if (!p.hasException()) { return null; } this.exception = convertFromProtoFormat(p.getException()); return this.exception; } @Override public void setResource(LocalResource resource) { maybeInitBuilder(); if (resource == null) builder.clearResource(); this.resource = resource; } @Override public void setStatus(ResourceStatusType status) { maybeInitBuilder(); if (status == null) { builder.clearStatus(); return; } builder.setStatus(convertToProtoFormat(status)); } @Override public void setLocalPath(URL localPath) { maybeInitBuilder(); if (localPath == null) builder.clearLocalPath(); this.localPath = localPath; } @Override public void setLocalSize(long size) { maybeInitBuilder(); builder.setLocalSize(size); } @Override public void setException(SerializedException exception) { maybeInitBuilder(); if (exception == null) builder.clearException(); this.exception = exception; } private LocalResourceProto convertToProtoFormat(LocalResource rsrc) { return ((LocalResourcePBImpl)rsrc).getProto(); } private LocalResourcePBImpl convertFromProtoFormat(LocalResourceProto rsrc) { return new LocalResourcePBImpl(rsrc); } private URLPBImpl convertFromProtoFormat(URLProto p) { return new URLPBImpl(p); } private URLProto convertToProtoFormat(URL t) { return ((URLPBImpl)t).getProto(); } private ResourceStatusTypeProto convertToProtoFormat(ResourceStatusType e) { return ResourceStatusTypeProto.valueOf(e.name()); } private ResourceStatusType convertFromProtoFormat(ResourceStatusTypeProto e) { return ResourceStatusType.valueOf(e.name()); } private SerializedExceptionPBImpl convertFromProtoFormat(SerializedExceptionProto p) { return new SerializedExceptionPBImpl(p); } private SerializedExceptionProto convertToProtoFormat(SerializedException t) { return ((SerializedExceptionPBImpl)t).getProto(); } }
7,007
30.146667
105
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/ResourceLocalizationSpecPBImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceLocalizationSpecProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceLocalizationSpecProtoOrBuilder; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; public class ResourceLocalizationSpecPBImpl extends ProtoBase<ResourceLocalizationSpecProto> implements ResourceLocalizationSpec { private ResourceLocalizationSpecProto proto = ResourceLocalizationSpecProto .getDefaultInstance(); private ResourceLocalizationSpecProto.Builder builder = null; private boolean viaProto; private LocalResource resource = null; private URL destinationDirectory = null; public ResourceLocalizationSpecPBImpl() { builder = ResourceLocalizationSpecProto.newBuilder(); } public ResourceLocalizationSpecPBImpl(ResourceLocalizationSpecProto proto) { this.proto = proto; viaProto = true; } @Override public LocalResource getResource() { ResourceLocalizationSpecProtoOrBuilder p = viaProto ? proto : builder; if (resource != null) { return resource; } if (!p.hasResource()) { return null; } resource = new LocalResourcePBImpl(p.getResource()); return resource; } @Override public void setResource(LocalResource rsrc) { maybeInitBuilder(); resource = rsrc; } @Override public URL getDestinationDirectory() { ResourceLocalizationSpecProtoOrBuilder p = viaProto ? proto : builder; if (destinationDirectory != null) { return destinationDirectory; } if (!p.hasDestinationDirectory()) { return null; } destinationDirectory = new URLPBImpl(p.getDestinationDirectory()); return destinationDirectory; } @Override public void setDestinationDirectory(URL destinationDirectory) { maybeInitBuilder(); this.destinationDirectory = destinationDirectory; } @Override public ResourceLocalizationSpecProto getProto() { mergeLocalToBuilder(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } private synchronized void maybeInitBuilder() { if (builder == null || viaProto) { builder = ResourceLocalizationSpecProto.newBuilder(proto); } viaProto = false; } private void mergeLocalToBuilder() { ResourceLocalizationSpecProtoOrBuilder l = viaProto ? proto : builder; if (this.resource != null && !(l.getResource() .equals(((LocalResourcePBImpl) resource).getProto()))) { maybeInitBuilder(); builder.setResource(((LocalResourcePBImpl) resource).getProto()); } if (this.destinationDirectory != null && !(l.getDestinationDirectory() .equals(((URLPBImpl) destinationDirectory).getProto()))) { maybeInitBuilder(); builder.setDestinationDirectory(((URLPBImpl) destinationDirectory) .getProto()); } } }
4,089
33.661017
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.service; import java.io.IOException; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; public class LocalizationProtocolPBServiceImpl implements LocalizationProtocolPB { private LocalizationProtocol real; public LocalizationProtocolPBServiceImpl(LocalizationProtocol impl) { this.real = impl; } @Override public LocalizerHeartbeatResponseProto heartbeat(RpcController controller, LocalizerStatusProto proto) throws ServiceException { LocalizerStatusPBImpl request = new LocalizerStatusPBImpl(proto); try { LocalizerHeartbeatResponse response = real.heartbeat(request); return ((LocalizerHeartbeatResponsePBImpl)response).getProto(); } catch (YarnException e) { throw new ServiceException(e); } catch (IOException e) { throw new ServiceException(e); } } }
2,437
41.034483
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.client; import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl; import com.google.protobuf.ServiceException; public class LocalizationProtocolPBClientImpl implements LocalizationProtocol, Closeable { private LocalizationProtocolPB proxy; public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, ProtobufRpcEngine.class); proxy = (LocalizationProtocolPB)RPC.getProxy( LocalizationProtocolPB.class, clientVersion, addr, conf); } @Override public void close() { if (this.proxy != null) { RPC.stopProxy(this.proxy); } } @Override public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) throws YarnException, IOException { LocalizerStatusProto statusProto = ((LocalizerStatusPBImpl)status).getProto(); try { return new LocalizerHeartbeatResponsePBImpl( proxy.heartbeat(null, statusProto)); } catch (ServiceException e) { RPCUtil.unwrapAndThrowException(e); return null; } } }
2,871
39.450704
126
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/LCEResourcesHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import java.io.IOException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; public interface LCEResourcesHandler extends Configurable { void init(LinuxContainerExecutor lce) throws IOException; /** * Called by the LinuxContainerExecutor before launching the executable * inside the container. * @param containerId the id of the container being launched * @param containerResource the node resources the container will be using */ void preExecute(ContainerId containerId, Resource containerResource) throws IOException; /** * Called by the LinuxContainerExecutor after the executable inside the * container has exited (successfully or not). * @param containerId the id of the container which was launched */ void postExecute(ContainerId containerId); String getResourcesOption(ContainerId containerId); }
1,876
36.54
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerBuilderUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; public class NodeManagerBuilderUtils { public static ResourceLocalizationSpec newResourceLocalizationSpec( LocalResource rsrc, Path path) { URL local = ConverterUtils.getYarnUrlFromPath(path); ResourceLocalizationSpec resourceLocalizationSpec = Records.newRecord(ResourceLocalizationSpec.class); resourceLocalizationSpec.setDestinationDirectory(local); resourceLocalizationSpec.setResource(rsrc); return resourceLocalizationSpec; } }
1,651
40.3
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; public class DefaultLCEResourcesHandler implements LCEResourcesHandler { final static Log LOG = LogFactory .getLog(DefaultLCEResourcesHandler.class); private Configuration conf; public DefaultLCEResourcesHandler() { } public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } public void init(LinuxContainerExecutor lce) { } /* * LCE Resources Handler interface */ public void preExecute(ContainerId containerId, Resource containerResource) { } public void postExecute(ContainerId containerId) { } public String getResourcesOption(ContainerId containerId) { return "cgroups=none"; } }
1,904
28.307692
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.SystemClock; public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { final static Log LOG = LogFactory .getLog(CgroupsLCEResourcesHandler.class); private Configuration conf; private String cgroupPrefix; private boolean cgroupMount; private String cgroupMountPath; private boolean cpuWeightEnabled = true; private boolean strictResourceUsageMode = false; private final String MTAB_FILE = "/proc/mounts"; private final String CGROUPS_FSTYPE = "cgroup"; private final String CONTROLLER_CPU = "cpu"; private final String CPU_PERIOD_US = "cfs_period_us"; private final String CPU_QUOTA_US = "cfs_quota_us"; private final int CPU_DEFAULT_WEIGHT = 1024; // set by kernel private final int MAX_QUOTA_US = 1000 * 1000; private final int MIN_PERIOD_US = 1000; private final Map<String, String> controllerPaths; // Controller -> path private long deleteCgroupTimeout; private long deleteCgroupDelay; // package private for testing purposes Clock clock; private float yarnProcessors; int nodeVCores; public CgroupsLCEResourcesHandler() { this.controllerPaths = new HashMap<String, String>(); clock = new SystemClock(); } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } @VisibleForTesting void initConfig() throws IOException { this.cgroupPrefix = conf.get(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn"); this.cgroupMount = conf.getBoolean(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_MOUNT, false); this.cgroupMountPath = conf.get(YarnConfiguration. NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null); this.deleteCgroupTimeout = conf.getLong( YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT); this.deleteCgroupDelay = conf.getLong(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY); // remove extra /'s at end or start of cgroupPrefix if (cgroupPrefix.charAt(0) == '/') { cgroupPrefix = cgroupPrefix.substring(1); } this.strictResourceUsageMode = conf .getBoolean( YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); int len = cgroupPrefix.length(); if (cgroupPrefix.charAt(len - 1) == '/') { cgroupPrefix = cgroupPrefix.substring(0, len - 1); } } public void init(LinuxContainerExecutor lce) throws IOException { this.init(lce, ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf)); } @VisibleForTesting void init(LinuxContainerExecutor lce, ResourceCalculatorPlugin plugin) throws IOException { initConfig(); // mount cgroups if requested if (cgroupMount && cgroupMountPath != null) { ArrayList<String> cgroupKVs = new ArrayList<String>(); cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" + CONTROLLER_CPU); lce.mountCgroups(cgroupKVs, cgroupPrefix); } initializeControllerPaths(); nodeVCores = NodeManagerHardwareUtils.getVCores(plugin, conf); // cap overall usage to the number of cores allocated to YARN yarnProcessors = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf); int systemProcessors = NodeManagerHardwareUtils.getNodeCPUs(plugin, conf); if (systemProcessors != (int) yarnProcessors) { LOG.info("YARN containers restricted to " + yarnProcessors + " cores"); int[] limits = getOverallLimits(yarnProcessors); updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, String.valueOf(limits[0])); updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(limits[1])); } else if (cpuLimitsExist()) { LOG.info("Removing CPU constraints for YARN containers."); updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(-1)); } } boolean cpuLimitsExist() throws IOException { String path = pathForCgroup(CONTROLLER_CPU, ""); File quotaFile = new File(path, CONTROLLER_CPU + "." + CPU_QUOTA_US); if (quotaFile.exists()) { String contents = FileUtils.readFileToString(quotaFile, "UTF-8"); int quotaUS = Integer.parseInt(contents.trim()); if (quotaUS != -1) { return true; } } return false; } @VisibleForTesting int[] getOverallLimits(float yarnProcessors) { int[] ret = new int[2]; if (yarnProcessors < 0.01f) { throw new IllegalArgumentException("Number of processors can't be <= 0."); } int quotaUS = MAX_QUOTA_US; int periodUS = (int) (MAX_QUOTA_US / yarnProcessors); if (yarnProcessors < 1.0f) { periodUS = MAX_QUOTA_US; quotaUS = (int) (periodUS * yarnProcessors); if (quotaUS < MIN_PERIOD_US) { LOG .warn("The quota calculated for the cgroup was too low. The minimum value is " + MIN_PERIOD_US + ", calculated value is " + quotaUS + ". Setting quota to minimum value."); quotaUS = MIN_PERIOD_US; } } // cfs_period_us can't be less than 1000 microseconds // if the value of periodUS is less than 1000, we can't really use cgroups // to limit cpu if (periodUS < MIN_PERIOD_US) { LOG .warn("The period calculated for the cgroup was too low. The minimum value is " + MIN_PERIOD_US + ", calculated value is " + periodUS + ". Using all available CPU."); periodUS = MAX_QUOTA_US; quotaUS = -1; } ret[0] = periodUS; ret[1] = quotaUS; return ret; } boolean isCpuWeightEnabled() { return this.cpuWeightEnabled; } /* * Next four functions are for an individual cgroup. */ private String pathForCgroup(String controller, String groupName) { String controllerPath = controllerPaths.get(controller); return controllerPath + "/" + cgroupPrefix + "/" + groupName; } private void createCgroup(String controller, String groupName) throws IOException { String path = pathForCgroup(controller, groupName); if (LOG.isDebugEnabled()) { LOG.debug("createCgroup: " + path); } if (! new File(path).mkdir()) { throw new IOException("Failed to create cgroup at " + path); } } private void updateCgroup(String controller, String groupName, String param, String value) throws IOException { String path = pathForCgroup(controller, groupName); param = controller + "." + param; if (LOG.isDebugEnabled()) { LOG.debug("updateCgroup: " + path + ": " + param + "=" + value); } PrintWriter pw = null; try { File file = new File(path + "/" + param); Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8"); pw = new PrintWriter(w); pw.write(value); } catch (IOException e) { throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path, e); } finally { if (pw != null) { boolean hasError = pw.checkError(); pw.close(); if(hasError) { throw new IOException("Unable to set " + param + "=" + value + " for cgroup at: " + path); } if(pw.checkError()) { throw new IOException("Error while closing cgroup file " + path); } } } } /* * Utility routine to print first line from cgroup tasks file */ private void logLineFromTasksFile(File cgf) { String str; if (LOG.isDebugEnabled()) { try (BufferedReader inl = new BufferedReader(new InputStreamReader(new FileInputStream(cgf + "/tasks"), "UTF-8"))) { if ((str = inl.readLine()) != null) { LOG.debug("First line in cgroup tasks file: " + cgf + " " + str); } } catch (IOException e) { LOG.warn("Failed to read cgroup tasks file. ", e); } } } /** * If tasks file is empty, delete the cgroup. * * @param file object referring to the cgroup to be deleted * @return Boolean indicating whether cgroup was deleted */ @VisibleForTesting boolean checkAndDeleteCgroup(File cgf) throws InterruptedException { boolean deleted = false; // FileInputStream in = null; try (FileInputStream in = new FileInputStream(cgf + "/tasks")) { if (in.read() == -1) { /* * "tasks" file is empty, sleep a bit more and then try to delete the * cgroup. Some versions of linux will occasionally panic due to a race * condition in this area, hence the paranoia. */ Thread.sleep(deleteCgroupDelay); deleted = cgf.delete(); if (!deleted) { LOG.warn("Failed attempt to delete cgroup: " + cgf); } } else { logLineFromTasksFile(cgf); } } catch (IOException e) { LOG.warn("Failed to read cgroup tasks file. ", e); } return deleted; } @VisibleForTesting boolean deleteCgroup(String cgroupPath) { boolean deleted = false; if (LOG.isDebugEnabled()) { LOG.debug("deleteCgroup: " + cgroupPath); } long start = clock.getTime(); do { try { deleted = checkAndDeleteCgroup(new File(cgroupPath)); if (!deleted) { Thread.sleep(deleteCgroupDelay); } } catch (InterruptedException ex) { // NOP } } while (!deleted && (clock.getTime() - start) < deleteCgroupTimeout); if (!deleted) { LOG.warn("Unable to delete cgroup at: " + cgroupPath + ", tried to delete for " + deleteCgroupTimeout + "ms"); } return deleted; } /* * Next three functions operate on all the resources we are enforcing. */ private void setupLimits(ContainerId containerId, Resource containerResource) throws IOException { String containerName = containerId.toString(); if (isCpuWeightEnabled()) { int containerVCores = containerResource.getVirtualCores(); createCgroup(CONTROLLER_CPU, containerName); int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores; updateCgroup(CONTROLLER_CPU, containerName, "shares", String.valueOf(cpuShares)); if (strictResourceUsageMode) { if (nodeVCores != containerVCores) { float containerCPU = (containerVCores * yarnProcessors) / (float) nodeVCores; int[] limits = getOverallLimits(containerCPU); updateCgroup(CONTROLLER_CPU, containerName, CPU_PERIOD_US, String.valueOf(limits[0])); updateCgroup(CONTROLLER_CPU, containerName, CPU_QUOTA_US, String.valueOf(limits[1])); } } } } private void clearLimits(ContainerId containerId) { if (isCpuWeightEnabled()) { deleteCgroup(pathForCgroup(CONTROLLER_CPU, containerId.toString())); } } /* * LCE Resources Handler interface */ public void preExecute(ContainerId containerId, Resource containerResource) throws IOException { setupLimits(containerId, containerResource); } public void postExecute(ContainerId containerId) { clearLimits(containerId); } public String getResourcesOption(ContainerId containerId) { String containerName = containerId.toString(); StringBuilder sb = new StringBuilder("cgroups="); if (isCpuWeightEnabled()) { sb.append(pathForCgroup(CONTROLLER_CPU, containerName) + "/tasks"); sb.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR); } if (sb.charAt(sb.length() - 1) == PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR) { sb.deleteCharAt(sb.length() - 1); } return sb.toString(); } /* We are looking for entries of the form: * none /cgroup/path/mem cgroup rw,memory 0 0 * * Use a simple pattern that splits on the five spaces, and * grabs the 2, 3, and 4th fields. */ private static final Pattern MTAB_FILE_FORMAT = Pattern.compile( "^[^\\s]+\\s([^\\s]+)\\s([^\\s]+)\\s([^\\s]+)\\s[^\\s]+\\s[^\\s]+$"); /* * Returns a map: path -> mount options * for mounts with type "cgroup". Cgroup controllers will * appear in the list of options for a path. */ private Map<String, List<String>> parseMtab() throws IOException { Map<String, List<String>> ret = new HashMap<String, List<String>>(); BufferedReader in = null; try { FileInputStream fis = new FileInputStream(new File(getMtabFileName())); in = new BufferedReader(new InputStreamReader(fis, "UTF-8")); for (String str = in.readLine(); str != null; str = in.readLine()) { Matcher m = MTAB_FILE_FORMAT.matcher(str); boolean mat = m.find(); if (mat) { String path = m.group(1); String type = m.group(2); String options = m.group(3); if (type.equals(CGROUPS_FSTYPE)) { List<String> value = Arrays.asList(options.split(",")); ret.put(path, value); } } } } catch (IOException e) { throw new IOException("Error while reading " + getMtabFileName(), e); } finally { IOUtils.cleanup(LOG, in); } return ret; } private String findControllerInMtab(String controller, Map<String, List<String>> entries) { for (Entry<String, List<String>> e : entries.entrySet()) { if (e.getValue().contains(controller)) return e.getKey(); } return null; } private void initializeControllerPaths() throws IOException { String controllerPath; Map<String, List<String>> parsedMtab = parseMtab(); // CPU controllerPath = findControllerInMtab(CONTROLLER_CPU, parsedMtab); if (controllerPath != null) { File f = new File(controllerPath + "/" + this.cgroupPrefix); if (FileUtil.canWrite(f)) { controllerPaths.put(CONTROLLER_CPU, controllerPath); } else { throw new IOException("Not able to enforce cpu weights; cannot write " + "to cgroup at: " + controllerPath); } } else { throw new IOException("Not able to enforce cpu weights; cannot find " + "cgroup for cpu controller in " + getMtabFileName()); } } @VisibleForTesting String getMtabFileName() { return MTAB_FILE; } @VisibleForTesting Map<String, String> getControllerPaths() { return Collections.unmodifiableMap(controllerPaths); } }
16,836
31.756809
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.util.ConverterUtils; /** * Helper functionality to read the pid from a file. */ public class ProcessIdFileReader { private static final Log LOG = LogFactory.getLog(ProcessIdFileReader.class); /** * Get the process id from specified file path. * Parses each line to find a valid number * and returns the first one found. * @return Process Id if obtained from path specified else null * @throws IOException */ public static String getProcessId(Path path) throws IOException { if (path == null) { throw new IOException("Trying to access process id from a null path"); } LOG.debug("Accessing pid from pid file " + path); String processId = null; BufferedReader bufReader = null; try { File file = new File(path.toString()); if (file.exists()) { FileInputStream fis = new FileInputStream(file); bufReader = new BufferedReader(new InputStreamReader(fis, "UTF-8")); while (true) { String line = bufReader.readLine(); if (line == null) { break; } String temp = line.trim(); if (!temp.isEmpty()) { if (Shell.WINDOWS) { // On Windows, pid is expected to be a container ID, so find first // line that parses successfully as a container ID. try { ConverterUtils.toContainerId(temp); processId = temp; break; } catch (Exception e) { // do nothing } } else { // Otherwise, find first line containing a numeric pid. try { Long pid = Long.valueOf(temp); if (pid > 0) { processId = temp; break; } } catch (Exception e) { // do nothing } } } } } } finally { if (bufReader != null) { bufReader.close(); } } LOG.debug("Got pid " + (processId != null? processId : "null") + " from path " + path); return processId; } }
3,382
30.915094
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; /** * Helper class to determine hardware related characteristics such as the * number of processors and the amount of memory on the node. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class NodeManagerHardwareUtils { private static final Log LOG = LogFactory .getLog(NodeManagerHardwareUtils.class); /** * * Returns the number of CPUs on the node. This value depends on the * configuration setting which decides whether to count logical processors * (such as hyperthreads) as cores or not. * * @param conf * - Configuration object * @return Number of CPUs */ public static int getNodeCPUs(Configuration conf) { ResourceCalculatorPlugin plugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf); return NodeManagerHardwareUtils.getNodeCPUs(plugin, conf); } /** * * Returns the number of CPUs on the node. This value depends on the * configuration setting which decides whether to count logical processors * (such as hyperthreads) as cores or not. * * @param plugin * - ResourceCalculatorPlugin object to determine hardware specs * @param conf * - Configuration object * @return Number of CPU cores on the node. */ public static int getNodeCPUs(ResourceCalculatorPlugin plugin, Configuration conf) { int numProcessors = plugin.getNumProcessors(); boolean countLogicalCores = conf.getBoolean(YarnConfiguration.NM_COUNT_LOGICAL_PROCESSORS_AS_CORES, YarnConfiguration.DEFAULT_NM_COUNT_LOGICAL_PROCESSORS_AS_CORES); if (!countLogicalCores) { numProcessors = plugin.getNumCores(); } return numProcessors; } /** * * Returns the fraction of CPUs that should be used for YARN containers. * The number is derived based on various configuration params such as * YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT * * @param conf * - Configuration object * @return Fraction of CPUs to be used for YARN containers */ public static float getContainersCPUs(Configuration conf) { ResourceCalculatorPlugin plugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf); return NodeManagerHardwareUtils.getContainersCPUs(plugin, conf); } /** * * Returns the fraction of CPUs that should be used for YARN containers. * The number is derived based on various configuration params such as * YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT * * @param plugin * - ResourceCalculatorPlugin object to determine hardware specs * @param conf * - Configuration object * @return Fraction of CPUs to be used for YARN containers */ public static float getContainersCPUs(ResourceCalculatorPlugin plugin, Configuration conf) { int numProcessors = getNodeCPUs(plugin, conf); int nodeCpuPercentage = getNodeCpuPercentage(conf); return (nodeCpuPercentage * numProcessors) / 100.0f; } /** * Gets the percentage of physical CPU that is configured for YARN containers. * This is percent {@literal >} 0 and {@literal <=} 100 based on * {@link YarnConfiguration#NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT} * @param conf Configuration object * @return percent {@literal >} 0 and {@literal <=} 100 */ public static int getNodeCpuPercentage(Configuration conf) { int nodeCpuPercentage = Math.min(conf.getInt( YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT), 100); nodeCpuPercentage = Math.max(0, nodeCpuPercentage); if (nodeCpuPercentage == 0) { String message = "Illegal value for " + YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT + ". Value cannot be less than or equal to 0."; throw new IllegalArgumentException(message); } return nodeCpuPercentage; } /** * Function to return the number of vcores on the system that can be used for * YARN containers. If a number is specified in the configuration file, then * that number is returned. If nothing is specified - 1. If the OS is an * "unknown" OS(one for which we don't have ResourceCalculatorPlugin * implemented), return the default NodeManager cores. 2. If the config * variable yarn.nodemanager.cpu.use_logical_processors is set to true, it * returns the logical processor count(count hyperthreads as cores), else it * returns the physical cores count. * * @param conf * - the configuration for the NodeManager * @return the number of cores to be used for YARN containers * */ public static int getVCores(Configuration conf) { // is this os for which we can determine cores? ResourceCalculatorPlugin plugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf); return NodeManagerHardwareUtils.getVCores(plugin, conf); } /** * Function to return the number of vcores on the system that can be used for * YARN containers. If a number is specified in the configuration file, then * that number is returned. If nothing is specified - 1. If the OS is an * "unknown" OS(one for which we don't have ResourceCalculatorPlugin * implemented), return the default NodeManager cores. 2. If the config * variable yarn.nodemanager.cpu.use_logical_processors is set to true, it * returns the logical processor count(count hyperthreads as cores), else it * returns the physical cores count. * * @param plugin * - ResourceCalculatorPlugin object to determine hardware specs * @param conf * - the configuration for the NodeManager * @return the number of cores to be used for YARN containers * */ public static int getVCores(ResourceCalculatorPlugin plugin, Configuration conf) { int cores; boolean hardwareDetectionEnabled = conf.getBoolean( YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION, YarnConfiguration.DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION); String message; if (!hardwareDetectionEnabled || plugin == null) { cores = conf.getInt(YarnConfiguration.NM_VCORES, YarnConfiguration.DEFAULT_NM_VCORES); if (cores == -1) { cores = YarnConfiguration.DEFAULT_NM_VCORES; } } else { cores = conf.getInt(YarnConfiguration.NM_VCORES, -1); if (cores == -1) { float physicalCores = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf); float multiplier = conf.getFloat(YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER, YarnConfiguration.DEFAULT_NM_PCORES_VCORES_MULTIPLIER); if (multiplier > 0) { float tmp = physicalCores * multiplier; if (tmp > 0 && tmp < 1) { // on a single core machine - tmp can be between 0 and 1 cores = 1; } else { cores = (int) tmp; } } else { message = "Illegal value for " + YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER + ". Value must be greater than 0."; throw new IllegalArgumentException(message); } } } if(cores <= 0) { message = "Illegal value for " + YarnConfiguration.NM_VCORES + ". Value must be greater than 0."; throw new IllegalArgumentException(message); } return cores; } /** * Function to return how much memory we should set aside for YARN containers. * If a number is specified in the configuration file, then that number is * returned. If nothing is specified - 1. If the OS is an "unknown" OS(one for * which we don't have ResourceCalculatorPlugin implemented), return the * default NodeManager physical memory. 2. If the OS has a * ResourceCalculatorPlugin implemented, the calculation is 0.8 * (RAM - 2 * * JVM-memory) i.e. use 80% of the memory after accounting for memory used by * the DataNode and the NodeManager. If the number is less than 1GB, log a * warning message. * * @param conf * - the configuration for the NodeManager * @return the amount of memory that will be used for YARN containers in MB. */ public static int getContainerMemoryMB(Configuration conf) { return NodeManagerHardwareUtils.getContainerMemoryMB( ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf), conf); } /** * Function to return how much memory we should set aside for YARN containers. * If a number is specified in the configuration file, then that number is * returned. If nothing is specified - 1. If the OS is an "unknown" OS(one for * which we don't have ResourceCalculatorPlugin implemented), return the * default NodeManager physical memory. 2. If the OS has a * ResourceCalculatorPlugin implemented, the calculation is 0.8 * (RAM - 2 * * JVM-memory) i.e. use 80% of the memory after accounting for memory used by * the DataNode and the NodeManager. If the number is less than 1GB, log a * warning message. * * @param plugin * - ResourceCalculatorPlugin object to determine hardware specs * @param conf * - the configuration for the NodeManager * @return the amount of memory that will be used for YARN containers in MB. */ public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin, Configuration conf) { int memoryMb; boolean hardwareDetectionEnabled = conf.getBoolean( YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION, YarnConfiguration.DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION); if (!hardwareDetectionEnabled || plugin == null) { memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB); if (memoryMb == -1) { memoryMb = YarnConfiguration.DEFAULT_NM_PMEM_MB; } } else { memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1); if (memoryMb == -1) { int physicalMemoryMB = (int) (plugin.getPhysicalMemorySize() / (1024 * 1024)); int hadoopHeapSizeMB = (int) (Runtime.getRuntime().maxMemory() / (1024 * 1024)); int containerPhysicalMemoryMB = (int) (0.8f * (physicalMemoryMB - (2 * hadoopHeapSizeMB))); int reservedMemoryMB = conf.getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1); if (reservedMemoryMB != -1) { containerPhysicalMemoryMB = physicalMemoryMB - reservedMemoryMB; } if(containerPhysicalMemoryMB <= 0) { LOG.error("Calculated memory for YARN containers is too low." + " Node memory is " + physicalMemoryMB + " MB, system reserved memory is " + reservedMemoryMB + " MB."); } containerPhysicalMemoryMB = Math.max(containerPhysicalMemoryMB, 0); memoryMb = containerPhysicalMemoryMB; } } if(memoryMb <= 0) { String message = "Illegal value for " + YarnConfiguration.NM_PMEM_MB + ". Value must be greater than 0."; throw new IllegalArgumentException(message); } return memoryMb; } }
12,556
38.990446
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.metrics; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.yarn.api.records.Resource; import com.google.common.annotations.VisibleForTesting; @Metrics(about="Metrics for node manager", context="yarn") public class NodeManagerMetrics { @Metric MutableCounterInt containersLaunched; @Metric MutableCounterInt containersCompleted; @Metric MutableCounterInt containersFailed; @Metric MutableCounterInt containersKilled; @Metric("# of initializing containers") MutableGaugeInt containersIniting; @Metric MutableGaugeInt containersRunning; @Metric("Current allocated memory in GB") MutableGaugeInt allocatedGB; @Metric("Current # of allocated containers") MutableGaugeInt allocatedContainers; @Metric MutableGaugeInt availableGB; @Metric("Current allocated Virtual Cores") MutableGaugeInt allocatedVCores; @Metric MutableGaugeInt availableVCores; @Metric("Container launch duration") MutableRate containerLaunchDuration; @Metric("# of bad local dirs") MutableGaugeInt badLocalDirs; @Metric("# of bad log dirs") MutableGaugeInt badLogDirs; @Metric("Disk utilization % on good local dirs") MutableGaugeInt goodLocalDirsDiskUtilizationPerc; @Metric("Disk utilization % on good log dirs") MutableGaugeInt goodLogDirsDiskUtilizationPerc; private JvmMetrics jvmMetrics = null; private long allocatedMB; private long availableMB; public NodeManagerMetrics(JvmMetrics jvmMetrics) { this.jvmMetrics = jvmMetrics; } public static NodeManagerMetrics create() { return create(DefaultMetricsSystem.instance()); } static NodeManagerMetrics create(MetricsSystem ms) { JvmMetrics jm = JvmMetrics.create("NodeManager", null, ms); return ms.register(new NodeManagerMetrics(jm)); } public JvmMetrics getJvmMetrics() { return jvmMetrics; } // Potential instrumentation interface methods public void launchedContainer() { containersLaunched.incr(); } public void completedContainer() { containersCompleted.incr(); } public void failedContainer() { containersFailed.incr(); } public void killedContainer() { containersKilled.incr(); } public void initingContainer() { containersIniting.incr(); } public void endInitingContainer() { containersIniting.decr(); } public void runningContainer() { containersRunning.incr(); } public void endRunningContainer() { containersRunning.decr(); } public void allocateContainer(Resource res) { allocatedContainers.incr(); allocatedMB = allocatedMB + res.getMemory(); allocatedGB.set((int)Math.ceil(allocatedMB/1024d)); availableMB = availableMB - res.getMemory(); availableGB.set((int)Math.floor(availableMB/1024d)); allocatedVCores.incr(res.getVirtualCores()); availableVCores.decr(res.getVirtualCores()); } public void releaseContainer(Resource res) { allocatedContainers.decr(); allocatedMB = allocatedMB - res.getMemory(); allocatedGB.set((int)Math.ceil(allocatedMB/1024d)); availableMB = availableMB + res.getMemory(); availableGB.set((int)Math.floor(availableMB/1024d)); allocatedVCores.decr(res.getVirtualCores()); availableVCores.incr(res.getVirtualCores()); } public void addResource(Resource res) { availableMB = availableMB + res.getMemory(); availableGB.incr((int)Math.floor(availableMB/1024d)); availableVCores.incr(res.getVirtualCores()); } public void addContainerLaunchDuration(long value) { containerLaunchDuration.add(value); } public void setBadLocalDirs(int badLocalDirs) { this.badLocalDirs.set(badLocalDirs); } public void setBadLogDirs(int badLogDirs) { this.badLogDirs.set(badLogDirs); } public void setGoodLocalDirsDiskUtilizationPerc( int goodLocalDirsDiskUtilizationPerc) { this.goodLocalDirsDiskUtilizationPerc.set(goodLocalDirsDiskUtilizationPerc); } public void setGoodLogDirsDiskUtilizationPerc( int goodLogDirsDiskUtilizationPerc) { this.goodLogDirsDiskUtilizationPerc.set(goodLogDirsDiskUtilizationPerc); } public int getRunningContainers() { return containersRunning.value(); } @VisibleForTesting public int getKilledContainers() { return containersKilled.value(); } @VisibleForTesting public int getFailedContainers() { return containersFailed.value(); } @VisibleForTesting public int getCompletedContainers() { return containersCompleted.value(); } @VisibleForTesting public int getBadLogDirs() { return badLogDirs.value(); } @VisibleForTesting public int getBadLocalDirs() { return badLocalDirs.value(); } @VisibleForTesting public int getGoodLogDirsDiskUtilizationPerc() { return goodLogDirsDiskUtilizationPerc.value(); } @VisibleForTesting public int getGoodLocalDirsDiskUtilizationPerc() { return goodLocalDirsDiskUtilizationPerc.value(); } }
6,194
29.367647
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.nodelabels; import java.util.Set; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.NodeLabel; /** * Interface which will be responsible for fetching the labels * */ public abstract class NodeLabelsProvider extends AbstractService { public NodeLabelsProvider(String name) { super(name); } /** * Provides the labels. LabelProvider is expected to give same Labels * continuously until there is a change in labels. * If null is returned then Empty label set is assumed by the caller. * * @return Set of node label strings applicable for a node */ public abstract Set<NodeLabel> getNodeLabels(); }
1,534
33.886364
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.nodemanager.recovery; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; @Private @Unstable public abstract class NMStateStoreService extends AbstractService { public NMStateStoreService(String name) { super(name); } public static class RecoveredApplicationsState { List<ContainerManagerApplicationProto> applications; List<ApplicationId> finishedApplications; public List<ContainerManagerApplicationProto> getApplications() { return applications; } public List<ApplicationId> getFinishedApplications() { return finishedApplications; } } public enum RecoveredContainerStatus { REQUESTED, LAUNCHED, COMPLETED } public static class RecoveredContainerState { RecoveredContainerStatus status; int exitCode = ContainerExitStatus.INVALID; boolean killed = false; String diagnostics = ""; StartContainerRequest startRequest; public RecoveredContainerStatus getStatus() { return status; } public int getExitCode() { return exitCode; } public boolean getKilled() { return killed; } public String getDiagnostics() { return diagnostics; } public StartContainerRequest getStartRequest() { return startRequest; } } public static class LocalResourceTrackerState { List<LocalizedResourceProto> localizedResources = new ArrayList<LocalizedResourceProto>(); Map<LocalResourceProto, Path> inProgressResources = new HashMap<LocalResourceProto, Path>(); public List<LocalizedResourceProto> getLocalizedResources() { return localizedResources; } public Map<LocalResourceProto, Path> getInProgressResources() { return inProgressResources; } public boolean isEmpty() { return localizedResources.isEmpty() && inProgressResources.isEmpty(); } } public static class RecoveredUserResources { LocalResourceTrackerState privateTrackerState = new LocalResourceTrackerState(); Map<ApplicationId, LocalResourceTrackerState> appTrackerStates = new HashMap<ApplicationId, LocalResourceTrackerState>(); public LocalResourceTrackerState getPrivateTrackerState() { return privateTrackerState; } public Map<ApplicationId, LocalResourceTrackerState> getAppTrackerStates() { return appTrackerStates; } } public static class RecoveredLocalizationState { LocalResourceTrackerState publicTrackerState = new LocalResourceTrackerState(); Map<String, RecoveredUserResources> userResources = new HashMap<String, RecoveredUserResources>(); public LocalResourceTrackerState getPublicTrackerState() { return publicTrackerState; } public Map<String, RecoveredUserResources> getUserResources() { return userResources; } } public static class RecoveredDeletionServiceState { List<DeletionServiceDeleteTaskProto> tasks; public List<DeletionServiceDeleteTaskProto> getTasks() { return tasks; } } public static class RecoveredNMTokensState { MasterKey currentMasterKey; MasterKey previousMasterKey; Map<ApplicationAttemptId, MasterKey> applicationMasterKeys; public MasterKey getCurrentMasterKey() { return currentMasterKey; } public MasterKey getPreviousMasterKey() { return previousMasterKey; } public Map<ApplicationAttemptId, MasterKey> getApplicationMasterKeys() { return applicationMasterKeys; } } public static class RecoveredContainerTokensState { MasterKey currentMasterKey; MasterKey previousMasterKey; Map<ContainerId, Long> activeTokens; public MasterKey getCurrentMasterKey() { return currentMasterKey; } public MasterKey getPreviousMasterKey() { return previousMasterKey; } public Map<ContainerId, Long> getActiveTokens() { return activeTokens; } } public static class RecoveredLogDeleterState { Map<ApplicationId, LogDeleterProto> logDeleterMap; public Map<ApplicationId, LogDeleterProto> getLogDeleterMap() { return logDeleterMap; } } /** Initialize the state storage */ @Override public void serviceInit(Configuration conf) throws IOException { initStorage(conf); } /** Start the state storage for use */ @Override public void serviceStart() throws IOException { startStorage(); } /** Shutdown the state storage. */ @Override public void serviceStop() throws IOException { closeStorage(); } public boolean canRecover() { return true; } public boolean isNewlyCreated() { return false; } /** * Load the state of applications * @return recovered state for applications * @throws IOException */ public abstract RecoveredApplicationsState loadApplicationsState() throws IOException; /** * Record the start of an application * @param appId the application ID * @param p state to store for the application * @throws IOException */ public abstract void storeApplication(ApplicationId appId, ContainerManagerApplicationProto p) throws IOException; /** * Record that an application has finished * @param appId the application ID * @throws IOException */ public abstract void storeFinishedApplication(ApplicationId appId) throws IOException; /** * Remove records corresponding to an application * @param appId the application ID * @throws IOException */ public abstract void removeApplication(ApplicationId appId) throws IOException; /** * Load the state of containers * @return recovered state for containers * @throws IOException */ public abstract List<RecoveredContainerState> loadContainersState() throws IOException; /** * Record a container start request * @param containerId the container ID * @param startRequest the container start request * @throws IOException */ public abstract void storeContainer(ContainerId containerId, StartContainerRequest startRequest) throws IOException; /** * Record that a container has been launched * @param containerId the container ID * @throws IOException */ public abstract void storeContainerLaunched(ContainerId containerId) throws IOException; /** * Record that a container has completed * @param containerId the container ID * @param exitCode the exit code from the container * @throws IOException */ public abstract void storeContainerCompleted(ContainerId containerId, int exitCode) throws IOException; /** * Record a request to kill a container * @param containerId the container ID * @throws IOException */ public abstract void storeContainerKilled(ContainerId containerId) throws IOException; /** * Record diagnostics for a container * @param containerId the container ID * @param diagnostics the container diagnostics * @throws IOException */ public abstract void storeContainerDiagnostics(ContainerId containerId, StringBuilder diagnostics) throws IOException; /** * Remove records corresponding to a container * @param containerId the container ID * @throws IOException */ public abstract void removeContainer(ContainerId containerId) throws IOException; /** * Load the state of localized resources * @return recovered localized resource state * @throws IOException */ public abstract RecoveredLocalizationState loadLocalizationState() throws IOException; /** * Record the start of localization for a resource * @param user the username or null if the resource is public * @param appId the application ID if the resource is app-specific or null * @param proto the resource request * @param localPath local filesystem path where the resource will be stored * @throws IOException */ public abstract void startResourceLocalization(String user, ApplicationId appId, LocalResourceProto proto, Path localPath) throws IOException; /** * Record the completion of a resource localization * @param user the username or null if the resource is public * @param appId the application ID if the resource is app-specific or null * @param proto the serialized localized resource * @throws IOException */ public abstract void finishResourceLocalization(String user, ApplicationId appId, LocalizedResourceProto proto) throws IOException; /** * Remove records related to a resource localization * @param user the username or null if the resource is public * @param appId the application ID if the resource is app-specific or null * @param localPath local filesystem path where the resource will be stored * @throws IOException */ public abstract void removeLocalizedResource(String user, ApplicationId appId, Path localPath) throws IOException; /** * Load the state of the deletion service * @return recovered deletion service state * @throws IOException */ public abstract RecoveredDeletionServiceState loadDeletionServiceState() throws IOException; /** * Record a deletion task * @param taskId the deletion task ID * @param taskProto the deletion task protobuf * @throws IOException */ public abstract void storeDeletionTask(int taskId, DeletionServiceDeleteTaskProto taskProto) throws IOException; /** * Remove records corresponding to a deletion task * @param taskId the deletion task ID * @throws IOException */ public abstract void removeDeletionTask(int taskId) throws IOException; /** * Load the state of NM tokens * @return recovered state of NM tokens * @throws IOException */ public abstract RecoveredNMTokensState loadNMTokensState() throws IOException; /** * Record the current NM token master key * @param key the master key * @throws IOException */ public abstract void storeNMTokenCurrentMasterKey(MasterKey key) throws IOException; /** * Record the previous NM token master key * @param key the previous master key * @throws IOException */ public abstract void storeNMTokenPreviousMasterKey(MasterKey key) throws IOException; /** * Record a master key corresponding to an application * @param attempt the application attempt ID * @param key the master key * @throws IOException */ public abstract void storeNMTokenApplicationMasterKey( ApplicationAttemptId attempt, MasterKey key) throws IOException; /** * Remove a master key corresponding to an application * @param attempt the application attempt ID * @throws IOException */ public abstract void removeNMTokenApplicationMasterKey( ApplicationAttemptId attempt) throws IOException; /** * Load the state of container tokens * @return recovered state of container tokens * @throws IOException */ public abstract RecoveredContainerTokensState loadContainerTokensState() throws IOException; /** * Record the current container token master key * @param key the master key * @throws IOException */ public abstract void storeContainerTokenCurrentMasterKey(MasterKey key) throws IOException; /** * Record the previous container token master key * @param key the previous master key * @throws IOException */ public abstract void storeContainerTokenPreviousMasterKey(MasterKey key) throws IOException; /** * Record the expiration time for a container token * @param containerId the container ID * @param expirationTime the container token expiration time * @throws IOException */ public abstract void storeContainerToken(ContainerId containerId, Long expirationTime) throws IOException; /** * Remove records for a container token * @param containerId the container ID * @throws IOException */ public abstract void removeContainerToken(ContainerId containerId) throws IOException; /** * Load the state of log deleters * @return recovered log deleter state * @throws IOException */ public abstract RecoveredLogDeleterState loadLogDeleterState() throws IOException; /** * Store the state of a log deleter * @param appId the application ID for the log deleter * @param proto the serialized state of the log deleter * @throws IOException */ public abstract void storeLogDeleter(ApplicationId appId, LogDeleterProto proto) throws IOException; /** * Remove the state of a log deleter * @param appId the application ID for the log deleter * @throws IOException */ public abstract void removeLogDeleter(ApplicationId appId) throws IOException; protected abstract void initStorage(Configuration conf) throws IOException; protected abstract void startStorage() throws IOException; protected abstract void closeStorage() throws IOException; }
14,958
28.739563
105
java